Commit 333fbaa0 authored by Leon Romanovsky's avatar Leon Romanovsky

net/mlx5: Move QP logic to mlx5_ib

The mlx5_core doesn't need any functionality coded in qp.c, so move
that file to drivers/infiniband/ be under mlx5_ib responsibility.
Reviewed-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
parent 42f9bbd1
...@@ -13,6 +13,7 @@ mlx5_ib-y := ah.o \ ...@@ -13,6 +13,7 @@ mlx5_ib-y := ah.o \
mem.o \ mem.o \
mr.o \ mr.o \
qp.o \ qp.o \
qpc.o \
restrack.o \ restrack.o \
srq.o \ srq.o \
srq_cmd.o srq_cmd.o
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "srq.h" #include "srq.h"
#include "qp.h"
static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe) static void mlx5_ib_cq_comp(struct mlx5_core_cq *cq, struct mlx5_eqe *eqe)
{ {
...@@ -484,7 +485,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq, ...@@ -484,7 +485,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
* because CQs will be locked while QPs are removed * because CQs will be locked while QPs are removed
* from the table. * from the table.
*/ */
mqp = __mlx5_qp_lookup(dev->mdev, qpn); mqp = radix_tree_lookup(&dev->qp_table.tree, qpn);
*cur_qp = to_mibqp(mqp); *cur_qp = to_mibqp(mqp);
} }
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "qp.h"
#include <linux/xarray.h> #include <linux/xarray.h>
#define UVERBS_MODULE_NAME mlx5_ib #define UVERBS_MODULE_NAME mlx5_ib
...@@ -1356,7 +1357,7 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, ...@@ -1356,7 +1357,7 @@ static int devx_obj_cleanup(struct ib_uobject *uobject,
} }
if (obj->flags & DEVX_OBJ_FLAGS_DCT) if (obj->flags & DEVX_OBJ_FLAGS_DCT)
ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
else if (obj->flags & DEVX_OBJ_FLAGS_CQ) else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
else else
...@@ -1450,9 +1451,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( ...@@ -1450,9 +1451,8 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
if (opcode == MLX5_CMD_OP_CREATE_DCT) { if (opcode == MLX5_CMD_OP_CREATE_DCT) {
obj->flags |= DEVX_OBJ_FLAGS_DCT; obj->flags |= DEVX_OBJ_FLAGS_DCT;
err = mlx5_core_create_dct(dev->mdev, &obj->core_dct, err = mlx5_core_create_dct(dev, &obj->core_dct, cmd_in,
cmd_in, cmd_in_len, cmd_in_len, cmd_out, cmd_out_len);
cmd_out, cmd_out_len);
} else if (opcode == MLX5_CMD_OP_CREATE_CQ) { } else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
obj->flags |= DEVX_OBJ_FLAGS_CQ; obj->flags |= DEVX_OBJ_FLAGS_CQ;
obj->core_cq.comp = devx_cq_comp; obj->core_cq.comp = devx_cq_comp;
...@@ -1499,7 +1499,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)( ...@@ -1499,7 +1499,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
obj_destroy: obj_destroy:
if (obj->flags & DEVX_OBJ_FLAGS_DCT) if (obj->flags & DEVX_OBJ_FLAGS_DCT)
mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct); mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct);
else if (obj->flags & DEVX_OBJ_FLAGS_CQ) else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq); mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
else else
......
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#include "ib_rep.h" #include "ib_rep.h"
#include "cmd.h" #include "cmd.h"
#include "srq.h" #include "srq.h"
#include "qp.h"
#include <linux/mlx5/fs_helpers.h> #include <linux/mlx5/fs_helpers.h>
#include <linux/mlx5/accel.h> #include <linux/mlx5/accel.h>
#include <rdma/uverbs_std_types.h> #include <rdma/uverbs_std_types.h>
...@@ -4632,8 +4633,7 @@ static void delay_drop_handler(struct work_struct *work) ...@@ -4632,8 +4633,7 @@ static void delay_drop_handler(struct work_struct *work)
atomic_inc(&delay_drop->events_cnt); atomic_inc(&delay_drop->events_cnt);
mutex_lock(&delay_drop->lock); mutex_lock(&delay_drop->lock);
err = mlx5_core_set_delay_drop(delay_drop->dev->mdev, err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout);
delay_drop->timeout);
if (err) { if (err) {
mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n", mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n",
delay_drop->timeout); delay_drop->timeout);
...@@ -7193,6 +7193,9 @@ static const struct mlx5_ib_profile pf_profile = { ...@@ -7193,6 +7193,9 @@ static const struct mlx5_ib_profile pf_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE, STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_roce_init, mlx5_ib_stage_roce_init,
mlx5_ib_stage_roce_cleanup), mlx5_ib_stage_roce_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_QP,
mlx5_init_qp_table,
mlx5_cleanup_qp_table),
STAGE_CREATE(MLX5_IB_STAGE_SRQ, STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table, mlx5_init_srq_table,
mlx5_cleanup_srq_table), mlx5_cleanup_srq_table),
...@@ -7250,6 +7253,9 @@ const struct mlx5_ib_profile raw_eth_profile = { ...@@ -7250,6 +7253,9 @@ const struct mlx5_ib_profile raw_eth_profile = {
STAGE_CREATE(MLX5_IB_STAGE_ROCE, STAGE_CREATE(MLX5_IB_STAGE_ROCE,
mlx5_ib_stage_raw_eth_roce_init, mlx5_ib_stage_raw_eth_roce_init,
mlx5_ib_stage_raw_eth_roce_cleanup), mlx5_ib_stage_raw_eth_roce_cleanup),
STAGE_CREATE(MLX5_IB_STAGE_QP,
mlx5_init_qp_table,
mlx5_cleanup_qp_table),
STAGE_CREATE(MLX5_IB_STAGE_SRQ, STAGE_CREATE(MLX5_IB_STAGE_SRQ,
mlx5_init_srq_table, mlx5_init_srq_table,
mlx5_cleanup_srq_table), mlx5_cleanup_srq_table),
......
...@@ -869,6 +869,7 @@ enum mlx5_ib_stages { ...@@ -869,6 +869,7 @@ enum mlx5_ib_stages {
MLX5_IB_STAGE_CAPS, MLX5_IB_STAGE_CAPS,
MLX5_IB_STAGE_NON_DEFAULT_CB, MLX5_IB_STAGE_NON_DEFAULT_CB,
MLX5_IB_STAGE_ROCE, MLX5_IB_STAGE_ROCE,
MLX5_IB_STAGE_QP,
MLX5_IB_STAGE_SRQ, MLX5_IB_STAGE_SRQ,
MLX5_IB_STAGE_DEVICE_RESOURCES, MLX5_IB_STAGE_DEVICE_RESOURCES,
MLX5_IB_STAGE_DEVICE_NOTIFIER, MLX5_IB_STAGE_DEVICE_NOTIFIER,
...@@ -1064,6 +1065,7 @@ struct mlx5_ib_dev { ...@@ -1064,6 +1065,7 @@ struct mlx5_ib_dev {
struct mlx5_dm dm; struct mlx5_dm dm;
u16 devx_whitelist_uid; u16 devx_whitelist_uid;
struct mlx5_srq_table srq_table; struct mlx5_srq_table srq_table;
struct mlx5_qp_table qp_table;
struct mlx5_async_ctx async_ctx; struct mlx5_async_ctx async_ctx;
struct mlx5_devx_event_table devx_event_table; struct mlx5_devx_event_table devx_event_table;
struct mlx5_var_table var_table; struct mlx5_var_table var_table;
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "cmd.h" #include "cmd.h"
#include "qp.h"
#include <linux/mlx5/eq.h> #include <linux/mlx5/eq.h>
...@@ -1219,7 +1220,7 @@ static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev, ...@@ -1219,7 +1220,7 @@ static inline struct mlx5_core_rsc_common *odp_get_rsc(struct mlx5_ib_dev *dev,
case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE: case MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE:
case MLX5_WQE_PF_TYPE_RESP: case MLX5_WQE_PF_TYPE_RESP:
case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC: case MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC:
common = mlx5_core_res_hold(dev->mdev, wq_num, MLX5_RES_QP); common = mlx5_core_res_hold(dev, wq_num, MLX5_RES_QP);
break; break;
default: default:
break; break;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "ib_rep.h" #include "ib_rep.h"
#include "cmd.h" #include "cmd.h"
#include "qp.h"
/* not supported currently */ /* not supported currently */
static int wq_signature; static int wq_signature;
...@@ -1336,7 +1337,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev, ...@@ -1336,7 +1337,7 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0); mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp); err = mlx5_core_create_sq_tracked(dev, in, inlen, &sq->base.mqp);
kvfree(in); kvfree(in);
...@@ -1356,7 +1357,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev, ...@@ -1356,7 +1357,7 @@ static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq) struct mlx5_ib_sq *sq)
{ {
destroy_flow_rule_vport_sq(sq); destroy_flow_rule_vport_sq(sq);
mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp); mlx5_core_destroy_sq_tracked(dev, &sq->base.mqp);
ib_umem_release(sq->ubuffer.umem); ib_umem_release(sq->ubuffer.umem);
} }
...@@ -1426,7 +1427,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, ...@@ -1426,7 +1427,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas); qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
memcpy(pas, qp_pas, rq_pas_size); memcpy(pas, qp_pas, rq_pas_size);
err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp); err = mlx5_core_create_rq_tracked(dev, in, inlen, &rq->base.mqp);
kvfree(in); kvfree(in);
...@@ -1436,7 +1437,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev, ...@@ -1436,7 +1437,7 @@ static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev, static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq) struct mlx5_ib_rq *rq)
{ {
mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp); mlx5_core_destroy_rq_tracked(dev, &rq->base.mqp);
} }
static bool tunnel_offload_supported(struct mlx5_core_dev *dev) static bool tunnel_offload_supported(struct mlx5_core_dev *dev)
...@@ -2347,7 +2348,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -2347,7 +2348,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata, err = create_raw_packet_qp(dev, qp, in, inlen, pd, udata,
&resp); &resp);
} else { } else {
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen); err = mlx5_core_create_qp(dev, &base->mqp, in, inlen);
} }
if (err) { if (err) {
...@@ -2513,8 +2514,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -2513,8 +2514,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp->state != IB_QPS_RESET) { if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET && if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
!(qp->flags & MLX5_IB_QP_UNDERLAY)) { !(qp->flags & MLX5_IB_QP_UNDERLAY)) {
err = mlx5_core_qp_modify(dev->mdev, err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp); NULL, &base->mqp);
} else { } else {
struct mlx5_modify_raw_qp_param raw_qp_param = { struct mlx5_modify_raw_qp_param raw_qp_param = {
...@@ -2555,7 +2555,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -2555,7 +2555,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
qp->flags & MLX5_IB_QP_UNDERLAY) { qp->flags & MLX5_IB_QP_UNDERLAY) {
destroy_raw_packet_qp(dev, qp); destroy_raw_packet_qp(dev, qp);
} else { } else {
err = mlx5_core_destroy_qp(dev->mdev, &base->mqp); err = mlx5_core_destroy_qp(dev, &base->mqp);
if (err) if (err)
mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
base->mqp.qpn); base->mqp.qpn);
...@@ -2818,7 +2818,7 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp) ...@@ -2818,7 +2818,7 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
if (mqp->state == IB_QPS_RTR) { if (mqp->state == IB_QPS_RTR) {
int err; int err;
err = mlx5_core_destroy_dct(dev->mdev, &mqp->dct.mdct); err = mlx5_core_destroy_dct(dev, &mqp->dct.mdct);
if (err) { if (err) {
mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err); mlx5_ib_warn(dev, "failed to destroy DCT %d\n", err);
return err; return err;
...@@ -3462,10 +3462,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp, ...@@ -3462,10 +3462,9 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
base = &mqp->trans_qp.base; base = &mqp->trans_qp.base;
context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff); context.qp_counter_set_usr_page &= cpu_to_be32(0xffffff);
context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24); context.qp_counter_set_usr_page |= cpu_to_be32(set_id << 24);
return mlx5_core_qp_modify(dev->mdev, return mlx5_core_qp_modify(dev, MLX5_CMD_OP_RTS2RTS_QP,
MLX5_CMD_OP_RTS2RTS_QP, MLX5_QP_OPTPAR_COUNTER_SET_ID, &context,
MLX5_QP_OPTPAR_COUNTER_SET_ID, &base->mqp);
&context, &base->mqp);
} }
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
...@@ -3752,8 +3751,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -3752,8 +3751,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity); err = modify_raw_packet_qp(dev, qp, &raw_qp_param, tx_affinity);
} else { } else {
err = mlx5_core_qp_modify(dev->mdev, op, optpar, context, err = mlx5_core_qp_modify(dev, op, optpar, context, &base->mqp);
&base->mqp);
} }
if (err) if (err)
...@@ -3927,7 +3925,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -3927,7 +3925,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index); MLX5_SET(dctc, dctc, my_addr_index, attr->ah_attr.grh.sgid_index);
MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit); MLX5_SET(dctc, dctc, hop_limit, attr->ah_attr.grh.hop_limit);
err = mlx5_core_create_dct(dev->mdev, &qp->dct.mdct, qp->dct.in, err = mlx5_core_create_dct(dev, &qp->dct.mdct, qp->dct.in,
MLX5_ST_SZ_BYTES(create_dct_in), out, MLX5_ST_SZ_BYTES(create_dct_in), out,
sizeof(out)); sizeof(out));
if (err) if (err)
...@@ -3935,7 +3933,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -3935,7 +3933,7 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
resp.dctn = qp->dct.mdct.mqp.qpn; resp.dctn = qp->dct.mdct.mqp.qpn;
err = ib_copy_to_udata(udata, &resp, resp.response_length); err = ib_copy_to_udata(udata, &resp, resp.response_length);
if (err) { if (err) {
mlx5_core_destroy_dct(dev->mdev, &qp->dct.mdct); mlx5_core_destroy_dct(dev, &qp->dct.mdct);
return err; return err;
} }
} else { } else {
...@@ -5697,8 +5695,7 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, ...@@ -5697,8 +5695,7 @@ static int query_qp_attr(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!outb) if (!outb)
return -ENOMEM; return -ENOMEM;
err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb, err = mlx5_core_qp_query(dev, &qp->trans_qp.base.mqp, outb, outlen);
outlen);
if (err) if (err)
goto out; goto out;
...@@ -5776,7 +5773,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp, ...@@ -5776,7 +5773,7 @@ static int mlx5_ib_dct_query_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *mqp,
if (!out) if (!out)
return -ENOMEM; return -ENOMEM;
err = mlx5_core_dct_query(dev->mdev, dct, out, outlen); err = mlx5_core_dct_query(dev, dct, out, outlen);
if (err) if (err)
goto out; goto out;
...@@ -5962,7 +5959,7 @@ static int set_delay_drop(struct mlx5_ib_dev *dev) ...@@ -5962,7 +5959,7 @@ static int set_delay_drop(struct mlx5_ib_dev *dev)
if (dev->delay_drop.activate) if (dev->delay_drop.activate)
goto out; goto out;
err = mlx5_core_set_delay_drop(dev->mdev, dev->delay_drop.timeout); err = mlx5_core_set_delay_drop(dev, dev->delay_drop.timeout);
if (err) if (err)
goto out; goto out;
...@@ -6068,13 +6065,13 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, ...@@ -6068,13 +6065,13 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
} }
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0); mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp); err = mlx5_core_create_rq_tracked(dev, in, inlen, &rwq->core_qp);
if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) { if (!err && init_attr->create_flags & IB_WQ_FLAGS_DELAY_DROP) {
err = set_delay_drop(dev); err = set_delay_drop(dev);
if (err) { if (err) {
mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n", mlx5_ib_warn(dev, "Failed to enable delay drop err=%d\n",
err); err);
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
} else { } else {
rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP; rwq->create_flags |= MLX5_IB_WQ_FLAGS_DELAY_DROP;
} }
...@@ -6256,7 +6253,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, ...@@ -6256,7 +6253,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
return &rwq->ibwq; return &rwq->ibwq;
err_copy: err_copy:
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
err_user_rq: err_user_rq:
destroy_user_rq(dev, pd, rwq, udata); destroy_user_rq(dev, pd, rwq, udata);
err: err:
...@@ -6269,7 +6266,7 @@ void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata) ...@@ -6269,7 +6266,7 @@ void mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
struct mlx5_ib_dev *dev = to_mdev(wq->device); struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq); struct mlx5_ib_rwq *rwq = to_mrwq(wq);
mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp); mlx5_core_destroy_rq_tracked(dev, &rwq->core_qp);
destroy_user_rq(dev, wq->pd, rwq, udata); destroy_user_rq(dev, wq->pd, rwq, udata);
kfree(rwq); kfree(rwq);
} }
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
*/
#ifndef _MLX5_IB_QP_H
#define _MLX5_IB_QP_H
#include "mlx5_ib.h"
int mlx5_init_qp_table(struct mlx5_ib_dev *dev);
void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev);
int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *qp,
u32 *in, int inlen, u32 *out, int outlen);
int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
u32 *in, int inlen);
int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
void *qpc, struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp);
int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct);
int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
u32 *out, int outlen);
int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, u32 timeout_usec);
void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq);
int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq);
void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *sq);
int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq);
struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
int res_num,
enum mlx5_res_type res_type);
void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn);
#endif /* _MLX5_IB_QP_H */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* /*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/ */
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/export.h>
#include <linux/mlx5/qp.h> #include <linux/mlx5/qp.h>
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include <linux/mlx5/transobj.h> #include "mlx5_ib.h"
#include "qp.h"
#include "mlx5_core.h" static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
#include "lib/eq.h"
static int mlx5_core_drain_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct); struct mlx5_core_dct *dct);
static struct mlx5_core_rsc_common * static struct mlx5_core_rsc_common *
...@@ -123,11 +93,9 @@ static int rsc_event_notifier(struct notifier_block *nb, ...@@ -123,11 +93,9 @@ static int rsc_event_notifier(struct notifier_block *nb,
{ {
struct mlx5_core_rsc_common *common; struct mlx5_core_rsc_common *common;
struct mlx5_qp_table *table; struct mlx5_qp_table *table;
struct mlx5_core_dev *dev;
struct mlx5_core_dct *dct; struct mlx5_core_dct *dct;
u8 event_type = (u8)type; u8 event_type = (u8)type;
struct mlx5_core_qp *qp; struct mlx5_core_qp *qp;
struct mlx5_priv *priv;
struct mlx5_eqe *eqe; struct mlx5_eqe *eqe;
u32 rsn; u32 rsn;
...@@ -154,22 +122,12 @@ static int rsc_event_notifier(struct notifier_block *nb, ...@@ -154,22 +122,12 @@ static int rsc_event_notifier(struct notifier_block *nb,
} }
table = container_of(nb, struct mlx5_qp_table, nb); table = container_of(nb, struct mlx5_qp_table, nb);
priv = container_of(table, struct mlx5_priv, qp_table);
dev = container_of(priv, struct mlx5_core_dev, priv);
mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn);
common = mlx5_get_rsc(table, rsn); common = mlx5_get_rsc(table, rsn);
if (!common) { if (!common)
mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
return NOTIFY_OK; return NOTIFY_OK;
}
if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) { if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type))
mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n",
event_type, rsn);
goto out; goto out;
}
switch (common->res) { switch (common->res) {
case MLX5_RES_QP: case MLX5_RES_QP:
...@@ -184,7 +142,7 @@ static int rsc_event_notifier(struct notifier_block *nb, ...@@ -184,7 +142,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
complete(&dct->drained); complete(&dct->drained);
break; break;
default: default:
mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn); break;
} }
out: out:
mlx5_core_put_rsc(common); mlx5_core_put_rsc(common);
...@@ -192,11 +150,10 @@ static int rsc_event_notifier(struct notifier_block *nb, ...@@ -192,11 +150,10 @@ static int rsc_event_notifier(struct notifier_block *nb,
return NOTIFY_OK; return NOTIFY_OK;
} }
static int create_resource_common(struct mlx5_core_dev *dev, static int create_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp, struct mlx5_core_qp *qp, int rsc_type)
int rsc_type)
{ {
struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_qp_table *table = &dev->qp_table;
int err; int err;
qp->common.res = rsc_type; qp->common.res = rsc_type;
...@@ -215,10 +172,10 @@ static int create_resource_common(struct mlx5_core_dev *dev, ...@@ -215,10 +172,10 @@ static int create_resource_common(struct mlx5_core_dev *dev,
return 0; return 0;
} }
static void destroy_resource_common(struct mlx5_core_dev *dev, static void destroy_resource_common(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *qp) struct mlx5_core_qp *qp)
{ {
struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_qp_table *table = &dev->qp_table;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&table->lock, flags); spin_lock_irqsave(&table->lock, flags);
...@@ -229,24 +186,19 @@ static void destroy_resource_common(struct mlx5_core_dev *dev, ...@@ -229,24 +186,19 @@ static void destroy_resource_common(struct mlx5_core_dev *dev,
wait_for_completion(&qp->common.free); wait_for_completion(&qp->common.free);
} }
static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev, static int _mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct, bool need_cleanup) struct mlx5_core_dct *dct, bool need_cleanup)
{ {
u32 out[MLX5_ST_SZ_DW(destroy_dct_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {};
u32 in[MLX5_ST_SZ_DW(destroy_dct_in)] = {0};
struct mlx5_core_qp *qp = &dct->mqp; struct mlx5_core_qp *qp = &dct->mqp;
int err; int err;
err = mlx5_core_drain_dct(dev, dct); err = mlx5_core_drain_dct(dev, dct);
if (err) { if (err) {
if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) { if (dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
goto destroy; goto destroy;
} else {
mlx5_core_warn( return err;
dev, "failed drain DCT 0x%x with error 0x%x\n",
qp->qpn, err);
return err;
}
} }
wait_for_completion(&dct->drained); wait_for_completion(&dct->drained);
destroy: destroy:
...@@ -255,15 +207,12 @@ static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev, ...@@ -255,15 +207,12 @@ static int _mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT); MLX5_SET(destroy_dct_in, in, opcode, MLX5_CMD_OP_DESTROY_DCT);
MLX5_SET(destroy_dct_in, in, dctn, qp->qpn); MLX5_SET(destroy_dct_in, in, dctn, qp->qpn);
MLX5_SET(destroy_dct_in, in, uid, qp->uid); MLX5_SET(destroy_dct_in, in, uid, qp->uid);
err = mlx5_cmd_exec(dev, (void *)&in, sizeof(in), err = mlx5_cmd_exec_in(dev->mdev, destroy_dct, in);
(void *)&out, sizeof(out));
return err; return err;
} }
int mlx5_core_create_dct(struct mlx5_core_dev *dev, int mlx5_core_create_dct(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
struct mlx5_core_dct *dct, u32 *in, int inlen, u32 *out, int outlen)
u32 *in, int inlen,
u32 *out, int outlen)
{ {
struct mlx5_core_qp *qp = &dct->mqp; struct mlx5_core_qp *qp = &dct->mqp;
int err; int err;
...@@ -271,11 +220,9 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev, ...@@ -271,11 +220,9 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
init_completion(&dct->drained); init_completion(&dct->drained);
MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT); MLX5_SET(create_dct_in, in, opcode, MLX5_CMD_OP_CREATE_DCT);
err = mlx5_cmd_exec(dev, in, inlen, out, outlen); err = mlx5_cmd_exec(dev->mdev, in, inlen, out, outlen);
if (err) { if (err)
mlx5_core_warn(dev, "create DCT failed, ret %d\n", err);
return err; return err;
}
qp->qpn = MLX5_GET(create_dct_out, out, dctn); qp->qpn = MLX5_GET(create_dct_out, out, dctn);
qp->uid = MLX5_GET(create_dct_in, in, uid); qp->uid = MLX5_GET(create_dct_in, in, uid);
...@@ -288,108 +235,83 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev, ...@@ -288,108 +235,83 @@ int mlx5_core_create_dct(struct mlx5_core_dev *dev,
_mlx5_core_destroy_dct(dev, dct, false); _mlx5_core_destroy_dct(dev, dct, false);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx5_core_create_dct);
int mlx5_core_create_qp(struct mlx5_core_dev *dev, int mlx5_core_create_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
struct mlx5_core_qp *qp,
u32 *in, int inlen) u32 *in, int inlen)
{ {
u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {0}; u32 out[MLX5_ST_SZ_DW(create_qp_out)] = {};
u32 dout[MLX5_ST_SZ_DW(destroy_qp_out)]; u32 din[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
u32 din[MLX5_ST_SZ_DW(destroy_qp_in)];
int err; int err;
MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP); MLX5_SET(create_qp_in, in, opcode, MLX5_CMD_OP_CREATE_QP);
err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
if (err) if (err)
return err; return err;
qp->uid = MLX5_GET(create_qp_in, in, uid); qp->uid = MLX5_GET(create_qp_in, in, uid);
qp->qpn = MLX5_GET(create_qp_out, out, qpn); qp->qpn = MLX5_GET(create_qp_out, out, qpn);
mlx5_core_dbg(dev, "qpn = 0x%x\n", qp->qpn);
err = create_resource_common(dev, qp, MLX5_RES_QP); err = create_resource_common(dev, qp, MLX5_RES_QP);
if (err) if (err)
goto err_cmd; goto err_cmd;
err = mlx5_debug_qp_add(dev, qp); mlx5_debug_qp_add(dev->mdev, qp);
if (err)
mlx5_core_dbg(dev, "failed adding QP 0x%x to debug file system\n",
qp->qpn);
atomic_inc(&dev->num_qps);
return 0; return 0;
err_cmd: err_cmd:
memset(din, 0, sizeof(din));
memset(dout, 0, sizeof(dout));
MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, din, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, din, qpn, qp->qpn); MLX5_SET(destroy_qp_in, din, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, din, uid, qp->uid); MLX5_SET(destroy_qp_in, din, uid, qp->uid);
mlx5_cmd_exec(dev, din, sizeof(din), dout, sizeof(dout)); mlx5_cmd_exec_in(dev->mdev, destroy_qp, din);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx5_core_create_qp);
static int mlx5_core_drain_dct(struct mlx5_core_dev *dev, static int mlx5_core_drain_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct) struct mlx5_core_dct *dct)
{ {
u32 out[MLX5_ST_SZ_DW(drain_dct_out)] = {0}; u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {};
u32 in[MLX5_ST_SZ_DW(drain_dct_in)] = {0};
struct mlx5_core_qp *qp = &dct->mqp; struct mlx5_core_qp *qp = &dct->mqp;
MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT); MLX5_SET(drain_dct_in, in, opcode, MLX5_CMD_OP_DRAIN_DCT);
MLX5_SET(drain_dct_in, in, dctn, qp->qpn); MLX5_SET(drain_dct_in, in, dctn, qp->qpn);
MLX5_SET(drain_dct_in, in, uid, qp->uid); MLX5_SET(drain_dct_in, in, uid, qp->uid);
return mlx5_cmd_exec(dev, (void *)&in, sizeof(in), return mlx5_cmd_exec_in(dev->mdev, drain_dct, in);
(void *)&out, sizeof(out));
} }
int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, int mlx5_core_destroy_dct(struct mlx5_ib_dev *dev,
struct mlx5_core_dct *dct) struct mlx5_core_dct *dct)
{ {
return _mlx5_core_destroy_dct(dev, dct, true); return _mlx5_core_destroy_dct(dev, dct, true);
} }
EXPORT_SYMBOL_GPL(mlx5_core_destroy_dct);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp)
struct mlx5_core_qp *qp)
{ {
u32 out[MLX5_ST_SZ_DW(destroy_qp_out)] = {0}; u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {};
u32 in[MLX5_ST_SZ_DW(destroy_qp_in)] = {0};
int err;
mlx5_debug_qp_remove(dev, qp); mlx5_debug_qp_remove(dev->mdev, qp);
destroy_resource_common(dev, qp); destroy_resource_common(dev, qp);
MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP);
MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); MLX5_SET(destroy_qp_in, in, qpn, qp->qpn);
MLX5_SET(destroy_qp_in, in, uid, qp->uid); MLX5_SET(destroy_qp_in, in, uid, qp->uid);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); mlx5_cmd_exec_in(dev->mdev, destroy_qp, in);
if (err)
return err;
atomic_dec(&dev->num_qps);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(mlx5_core_destroy_qp);
int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev,
u32 timeout_usec) u32 timeout_usec)
{ {
u32 out[MLX5_ST_SZ_DW(set_delay_drop_params_out)] = {0}; u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {};
u32 in[MLX5_ST_SZ_DW(set_delay_drop_params_in)] = {0};
MLX5_SET(set_delay_drop_params_in, in, opcode, MLX5_SET(set_delay_drop_params_in, in, opcode,
MLX5_CMD_OP_SET_DELAY_DROP_PARAMS); MLX5_CMD_OP_SET_DELAY_DROP_PARAMS);
MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout, MLX5_SET(set_delay_drop_params_in, in, delay_drop_timeout,
timeout_usec / 100); timeout_usec / 100);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec_in(dev->mdev, set_delay_drop_params, in);
} }
EXPORT_SYMBOL_GPL(mlx5_core_set_delay_drop);
struct mbox_info { struct mbox_info {
u32 *in; u32 *in;
...@@ -495,120 +417,112 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn, ...@@ -495,120 +417,112 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
opt_param_mask, qpc, uid); opt_param_mask, qpc, uid);
break; break;
default: default:
mlx5_core_err(dev, "Unknown transition for modify QP: OP(0x%x) QPN(0x%x)\n",
opcode, qpn);
return -EINVAL; return -EINVAL;
} }
return 0; return 0;
} }
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, int mlx5_core_qp_modify(struct mlx5_ib_dev *dev, u16 opcode, u32 opt_param_mask,
u32 opt_param_mask, void *qpc, void *qpc, struct mlx5_core_qp *qp)
struct mlx5_core_qp *qp)
{ {
struct mbox_info mbox; struct mbox_info mbox;
int err; int err;
err = modify_qp_mbox_alloc(dev, opcode, qp->qpn, err = modify_qp_mbox_alloc(dev->mdev, opcode, qp->qpn,
opt_param_mask, qpc, &mbox, qp->uid); opt_param_mask, qpc, &mbox, qp->uid);
if (err) if (err)
return err; return err;
err = mlx5_cmd_exec(dev, mbox.in, mbox.inlen, mbox.out, mbox.outlen); err = mlx5_cmd_exec(dev->mdev, mbox.in, mbox.inlen, mbox.out,
mbox.outlen);
mbox_free(&mbox); mbox_free(&mbox);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx5_core_qp_modify);
void mlx5_init_qp_table(struct mlx5_core_dev *dev) int mlx5_init_qp_table(struct mlx5_ib_dev *dev)
{ {
struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_qp_table *table = &dev->qp_table;
memset(table, 0, sizeof(*table));
spin_lock_init(&table->lock); spin_lock_init(&table->lock);
INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); INIT_RADIX_TREE(&table->tree, GFP_ATOMIC);
mlx5_qp_debugfs_init(dev); mlx5_qp_debugfs_init(dev->mdev);
table->nb.notifier_call = rsc_event_notifier; table->nb.notifier_call = rsc_event_notifier;
mlx5_notifier_register(dev, &table->nb); mlx5_notifier_register(dev->mdev, &table->nb);
return 0;
} }
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) void mlx5_cleanup_qp_table(struct mlx5_ib_dev *dev)
{ {
struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_qp_table *table = &dev->qp_table;
mlx5_notifier_unregister(dev, &table->nb); mlx5_notifier_unregister(dev->mdev, &table->nb);
mlx5_qp_debugfs_cleanup(dev); mlx5_qp_debugfs_cleanup(dev->mdev);
} }
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, int mlx5_core_qp_query(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen) u32 *out, int outlen)
{ {
u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {0}; u32 in[MLX5_ST_SZ_DW(query_qp_in)] = {};
MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP); MLX5_SET(query_qp_in, in, opcode, MLX5_CMD_OP_QUERY_QP);
MLX5_SET(query_qp_in, in, qpn, qp->qpn); MLX5_SET(query_qp_in, in, qpn, qp->qpn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); return mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, outlen);
} }
EXPORT_SYMBOL_GPL(mlx5_core_qp_query);
int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, int mlx5_core_dct_query(struct mlx5_ib_dev *dev, struct mlx5_core_dct *dct,
u32 *out, int outlen) u32 *out, int outlen)
{ {
u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {0}; u32 in[MLX5_ST_SZ_DW(query_dct_in)] = {};
struct mlx5_core_qp *qp = &dct->mqp; struct mlx5_core_qp *qp = &dct->mqp;
MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT); MLX5_SET(query_dct_in, in, opcode, MLX5_CMD_OP_QUERY_DCT);
MLX5_SET(query_dct_in, in, dctn, qp->qpn); MLX5_SET(query_dct_in, in, dctn, qp->qpn);
return mlx5_cmd_exec(dev, (void *)&in, sizeof(in), return mlx5_cmd_exec(dev->mdev, (void *)&in, sizeof(in), (void *)out,
(void *)out, outlen); outlen);
} }
EXPORT_SYMBOL_GPL(mlx5_core_dct_query);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn) int mlx5_core_xrcd_alloc(struct mlx5_ib_dev *dev, u32 *xrcdn)
{ {
u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {0}; u32 out[MLX5_ST_SZ_DW(alloc_xrcd_out)] = {};
u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {0}; u32 in[MLX5_ST_SZ_DW(alloc_xrcd_in)] = {};
int err; int err;
MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD); MLX5_SET(alloc_xrcd_in, in, opcode, MLX5_CMD_OP_ALLOC_XRCD);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); err = mlx5_cmd_exec_inout(dev->mdev, alloc_xrcd, in, out);
if (!err) if (!err)
*xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd); *xrcdn = MLX5_GET(alloc_xrcd_out, out, xrcd);
return err; return err;
} }
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_alloc);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn) int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn)
{ {
u32 out[MLX5_ST_SZ_DW(dealloc_xrcd_out)] = {0}; u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {};
u32 in[MLX5_ST_SZ_DW(dealloc_xrcd_in)] = {0};
MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD); MLX5_SET(dealloc_xrcd_in, in, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn); MLX5_SET(dealloc_xrcd_in, in, xrcd, xrcdn);
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in);
} }
EXPORT_SYMBOL_GPL(mlx5_core_xrcd_dealloc);
static void destroy_rq_tracked(struct mlx5_core_dev *dev, u32 rqn, u16 uid) static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid)
{ {
u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {}; u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_rq_out)] = {};
MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
MLX5_SET(destroy_rq_in, in, rqn, rqn); MLX5_SET(destroy_rq_in, in, rqn, rqn);
MLX5_SET(destroy_rq_in, in, uid, uid); MLX5_SET(destroy_rq_in, in, uid, uid);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); mlx5_cmd_exec_in(dev->mdev, destroy_rq, in);
} }
int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq) struct mlx5_core_qp *rq)
{ {
int err; int err;
u32 rqn; u32 rqn;
err = mlx5_core_create_rq(dev, in, inlen, &rqn); err = mlx5_core_create_rq(dev->mdev, in, inlen, &rqn);
if (err) if (err)
return err; return err;
...@@ -625,39 +539,37 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, ...@@ -625,39 +539,37 @@ int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err; return err;
} }
EXPORT_SYMBOL(mlx5_core_create_rq_tracked);
void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev, void mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *rq) struct mlx5_core_qp *rq)
{ {
destroy_resource_common(dev, rq); destroy_resource_common(dev, rq);
destroy_rq_tracked(dev, rq->qpn, rq->uid); destroy_rq_tracked(dev, rq->qpn, rq->uid);
} }
EXPORT_SYMBOL(mlx5_core_destroy_rq_tracked);
static void destroy_sq_tracked(struct mlx5_core_dev *dev, u32 sqn, u16 uid) static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid)
{ {
u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {}; u32 in[MLX5_ST_SZ_DW(destroy_sq_in)] = {};
u32 out[MLX5_ST_SZ_DW(destroy_sq_out)] = {};
MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ); MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
MLX5_SET(destroy_sq_in, in, sqn, sqn); MLX5_SET(destroy_sq_in, in, sqn, sqn);
MLX5_SET(destroy_sq_in, in, uid, uid); MLX5_SET(destroy_sq_in, in, uid, uid);
mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); mlx5_cmd_exec_in(dev->mdev, destroy_sq, in);
} }
int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, int mlx5_core_create_sq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq) struct mlx5_core_qp *sq)
{ {
u32 out[MLX5_ST_SZ_DW(create_sq_out)] = {};
int err; int err;
u32 sqn;
err = mlx5_core_create_sq(dev, in, inlen, &sqn); MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
err = mlx5_cmd_exec(dev->mdev, in, inlen, out, sizeof(out));
if (err) if (err)
return err; return err;
sq->qpn = MLX5_GET(create_sq_out, out, sqn);
sq->uid = MLX5_GET(create_sq_in, in, uid); sq->uid = MLX5_GET(create_sq_in, in, uid);
sq->qpn = sqn;
err = create_resource_common(dev, sq, MLX5_RES_SQ); err = create_resource_common(dev, sq, MLX5_RES_SQ);
if (err) if (err)
goto err_destroy_sq; goto err_destroy_sq;
...@@ -669,29 +581,25 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen, ...@@ -669,29 +581,25 @@ int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
return err; return err;
} }
EXPORT_SYMBOL(mlx5_core_create_sq_tracked);
void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev, void mlx5_core_destroy_sq_tracked(struct mlx5_ib_dev *dev,
struct mlx5_core_qp *sq) struct mlx5_core_qp *sq)
{ {
destroy_resource_common(dev, sq); destroy_resource_common(dev, sq);
destroy_sq_tracked(dev, sq->qpn, sq->uid); destroy_sq_tracked(dev, sq->qpn, sq->uid);
} }
EXPORT_SYMBOL(mlx5_core_destroy_sq_tracked);
struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev, struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_ib_dev *dev,
int res_num, int res_num,
enum mlx5_res_type res_type) enum mlx5_res_type res_type)
{ {
u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN); u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN);
struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_qp_table *table = &dev->qp_table;
return mlx5_get_rsc(table, rsn); return mlx5_get_rsc(table, rsn);
} }
EXPORT_SYMBOL_GPL(mlx5_core_res_hold);
void mlx5_core_res_put(struct mlx5_core_rsc_common *res) void mlx5_core_res_put(struct mlx5_core_rsc_common *res)
{ {
mlx5_core_put_rsc(res); mlx5_core_put_rsc(res);
} }
EXPORT_SYMBOL_GPL(mlx5_core_res_put);
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/mlx5/driver.h> #include <linux/mlx5/driver.h>
#include "mlx5_ib.h" #include "mlx5_ib.h"
#include "srq.h" #include "srq.h"
#include "qp.h"
static int get_pas_size(struct mlx5_srq_attr *in) static int get_pas_size(struct mlx5_srq_attr *in)
{ {
......
...@@ -12,7 +12,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o ...@@ -12,7 +12,7 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
# mlx5 core basic # mlx5 core basic
# #
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \ health.o mcg.o cq.o alloc.o port.o mr.o pd.o \
transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \
fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \
lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \
......
...@@ -101,15 +101,15 @@ void mlx5_unregister_debugfs(void) ...@@ -101,15 +101,15 @@ void mlx5_unregister_debugfs(void)
void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev) void mlx5_qp_debugfs_init(struct mlx5_core_dev *dev)
{ {
atomic_set(&dev->num_qps, 0);
dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root); dev->priv.qp_debugfs = debugfs_create_dir("QPs", dev->priv.dbg_root);
} }
EXPORT_SYMBOL(mlx5_qp_debugfs_init);
void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev) void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev)
{ {
debugfs_remove_recursive(dev->priv.qp_debugfs); debugfs_remove_recursive(dev->priv.qp_debugfs);
} }
EXPORT_SYMBOL(mlx5_qp_debugfs_cleanup);
void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev) void mlx5_eq_debugfs_init(struct mlx5_core_dev *dev)
{ {
...@@ -450,6 +450,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) ...@@ -450,6 +450,7 @@ int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
return err; return err;
} }
EXPORT_SYMBOL(mlx5_debug_qp_add);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
{ {
...@@ -459,6 +460,7 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp) ...@@ -459,6 +460,7 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp)
if (qp->dbg) if (qp->dbg)
rem_res_tree(qp->dbg); rem_res_tree(qp->dbg);
} }
EXPORT_SYMBOL(mlx5_debug_qp_remove);
int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq) int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{ {
......
...@@ -836,8 +836,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -836,8 +836,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
mlx5_cq_debugfs_init(dev); mlx5_cq_debugfs_init(dev);
mlx5_init_qp_table(dev);
mlx5_init_reserved_gids(dev); mlx5_init_reserved_gids(dev);
mlx5_init_clock(dev); mlx5_init_clock(dev);
...@@ -896,7 +894,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev) ...@@ -896,7 +894,6 @@ static int mlx5_init_once(struct mlx5_core_dev *dev)
err_tables_cleanup: err_tables_cleanup:
mlx5_geneve_destroy(dev->geneve); mlx5_geneve_destroy(dev->geneve);
mlx5_vxlan_destroy(dev->vxlan); mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev); mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
err_eq_cleanup: err_eq_cleanup:
...@@ -924,7 +921,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) ...@@ -924,7 +921,6 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_vxlan_destroy(dev->vxlan); mlx5_vxlan_destroy(dev->vxlan);
mlx5_cleanup_clock(dev); mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cq_debugfs_cleanup(dev); mlx5_cq_debugfs_cleanup(dev);
mlx5_events_cleanup(dev); mlx5_events_cleanup(dev);
mlx5_eq_table_cleanup(dev); mlx5_eq_table_cleanup(dev);
......
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef MLX5_CMD_H
#define MLX5_CMD_H
#include <linux/types.h>
struct manage_pages_layout {
u64 ptr;
u32 reserved;
u16 num_entries;
u16 func_id;
};
struct mlx5_cmd_alloc_uar_imm_out {
u32 rsvd[3];
u32 uarn;
};
#endif /* MLX5_CMD_H */
...@@ -541,7 +541,6 @@ struct mlx5_priv { ...@@ -541,7 +541,6 @@ struct mlx5_priv {
struct mlx5_core_health health; struct mlx5_core_health health;
/* start: qp staff */ /* start: qp staff */
struct mlx5_qp_table qp_table;
struct dentry *qp_debugfs; struct dentry *qp_debugfs;
struct dentry *eq_debugfs; struct dentry *eq_debugfs;
struct dentry *cq_debugfs; struct dentry *cq_debugfs;
...@@ -687,7 +686,6 @@ struct mlx5_core_dev { ...@@ -687,7 +686,6 @@ struct mlx5_core_dev {
unsigned long intf_state; unsigned long intf_state;
struct mlx5_priv priv; struct mlx5_priv priv;
struct mlx5_profile *profile; struct mlx5_profile *profile;
atomic_t num_qps;
u32 issi; u32 issi;
struct mlx5e_resources mlx5e_res; struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm; struct mlx5_dm *dm;
......
...@@ -553,53 +553,8 @@ struct mlx5_qp_context { ...@@ -553,53 +553,8 @@ struct mlx5_qp_context {
u8 rsvd1[24]; u8 rsvd1[24];
}; };
static inline struct mlx5_core_qp *__mlx5_qp_lookup(struct mlx5_core_dev *dev, u32 qpn)
{
return radix_tree_lookup(&dev->priv.qp_table.tree, qpn);
}
int mlx5_core_create_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *qp,
u32 *in, int inlen,
u32 *out, int outlen);
int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
u32 *in,
int inlen);
int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode,
u32 opt_param_mask, void *qpc,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_dct(struct mlx5_core_dev *dev,
struct mlx5_core_dct *dct);
int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp,
u32 *out, int outlen);
int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct,
u32 *out, int outlen);
int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev,
u32 timeout_usec);
int mlx5_core_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn);
int mlx5_core_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn);
void mlx5_init_qp_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev);
int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); int mlx5_debug_qp_add(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp); void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *rq);
void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *rq);
int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
struct mlx5_core_qp *sq);
void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
struct mlx5_core_qp *sq);
struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev,
int res_num,
enum mlx5_res_type res_type);
void mlx5_core_res_put(struct mlx5_core_rsc_common *res);
static inline const char *mlx5_qp_type_str(int type) static inline const char *mlx5_qp_type_str(int type)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment