Commit 19098df2 authored by majd@mellanox.com's avatar majd@mellanox.com Committed by Doug Ledford

IB/mlx5: Refactor mlx5_ib_qp to accommodate other QP types

Extract specific IB QP fields to mlx5_ib_qp_trans structure.
The mlx5_core QP object resides in mlx5_ib_qp_base, which all QP types
inherit from. When we need to find mlx5_ib_qp using mlx5_core QP
(event handling and co), we use a pointer that resides in
mlx5_ib_qp_base.

In addition, we delete all redundant fields that weren't used anywhere
in the code:
-doorbell_qpn
-sq_max_wqes_per_wr
-sq_spare_wqes
Signed-off-by: default avatarMajd Dibbiny <majd@mellanox.com>
Reviewed-by: default avatarMatan Barak <matanb@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 146d2f1a
...@@ -191,35 +191,44 @@ struct mlx5_ib_pfault { ...@@ -191,35 +191,44 @@ struct mlx5_ib_pfault {
struct mlx5_pagefault mpfault; struct mlx5_pagefault mpfault;
}; };
struct mlx5_ib_ubuffer {
struct ib_umem *umem;
int buf_size;
u64 buf_addr;
};
struct mlx5_ib_qp_base {
struct mlx5_ib_qp *container_mibqp;
struct mlx5_core_qp mqp;
struct mlx5_ib_ubuffer ubuffer;
};
struct mlx5_ib_qp_trans {
struct mlx5_ib_qp_base base;
u16 xrcdn;
u8 alt_port;
u8 atomic_rd_en;
u8 resp_depth;
};
struct mlx5_ib_qp { struct mlx5_ib_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
struct mlx5_core_qp mqp; struct mlx5_ib_qp_trans trans_qp;
struct mlx5_buf buf; struct mlx5_buf buf;
struct mlx5_db db; struct mlx5_db db;
struct mlx5_ib_wq rq; struct mlx5_ib_wq rq;
u32 doorbell_qpn;
u8 sq_signal_bits; u8 sq_signal_bits;
u8 fm_cache; u8 fm_cache;
int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct mlx5_ib_wq sq; struct mlx5_ib_wq sq;
struct ib_umem *umem;
int buf_size;
/* serialize qp state modifications /* serialize qp state modifications
*/ */
struct mutex mutex; struct mutex mutex;
u16 xrcdn;
u32 flags; u32 flags;
u8 port; u8 port;
u8 alt_port;
u8 atomic_rd_en;
u8 resp_depth;
u8 state; u8 state;
int mlx_type;
int wq_sig; int wq_sig;
int scat_cqe; int scat_cqe;
int max_inline_data; int max_inline_data;
...@@ -489,7 +498,7 @@ static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq) ...@@ -489,7 +498,7 @@ static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
{ {
return container_of(mqp, struct mlx5_ib_qp, mqp); return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
} }
static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr) static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mr *mmr)
...@@ -567,7 +576,8 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -567,7 +576,8 @@ int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr); struct ib_recv_wr **bad_wr);
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n); void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n);
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length); void *buffer, u32 length,
struct mlx5_ib_qp_base *base);
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr, const struct ib_cq_init_attr *attr,
struct ib_ucontext *context, struct ib_ucontext *context,
......
...@@ -153,14 +153,16 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev, ...@@ -153,14 +153,16 @@ static struct mlx5_ib_mr *mlx5_ib_odp_find_mr_lkey(struct mlx5_ib_dev *dev,
static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp, static void mlx5_ib_page_fault_resume(struct mlx5_ib_qp *qp,
struct mlx5_ib_pfault *pfault, struct mlx5_ib_pfault *pfault,
int error) { int error)
{
struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device); struct mlx5_ib_dev *dev = to_mdev(qp->ibqp.pd->device);
int ret = mlx5_core_page_fault_resume(dev->mdev, qp->mqp.qpn, u32 qpn = qp->trans_qp.base.mqp.qpn;
int ret = mlx5_core_page_fault_resume(dev->mdev,
qpn,
pfault->mpfault.flags, pfault->mpfault.flags,
error); error);
if (ret) if (ret)
pr_err("Failed to resolve the page fault on QP 0x%x\n", pr_err("Failed to resolve the page fault on QP 0x%x\n", qpn);
qp->mqp.qpn);
} }
/* /*
...@@ -391,6 +393,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -391,6 +393,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
#if defined(DEBUG) #if defined(DEBUG)
u32 ctrl_wqe_index, ctrl_qpn; u32 ctrl_wqe_index, ctrl_qpn;
#endif #endif
u32 qpn = qp->trans_qp.base.mqp.qpn;
ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK; ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
if (ds * MLX5_WQE_DS_UNITS > wqe_length) { if (ds * MLX5_WQE_DS_UNITS > wqe_length) {
...@@ -401,7 +404,7 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -401,7 +404,7 @@ static int mlx5_ib_mr_initiator_pfault_handler(
if (ds == 0) { if (ds == 0) {
mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n", mlx5_ib_err(dev, "Got WQE with zero DS. wqe_index=%x, qpn=%x\n",
wqe_index, qp->mqp.qpn); wqe_index, qpn);
return -EFAULT; return -EFAULT;
} }
...@@ -411,16 +414,16 @@ static int mlx5_ib_mr_initiator_pfault_handler( ...@@ -411,16 +414,16 @@ static int mlx5_ib_mr_initiator_pfault_handler(
MLX5_WQE_CTRL_WQE_INDEX_SHIFT; MLX5_WQE_CTRL_WQE_INDEX_SHIFT;
if (wqe_index != ctrl_wqe_index) { if (wqe_index != ctrl_wqe_index) {
mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n", mlx5_ib_err(dev, "Got WQE with invalid wqe_index. wqe_index=0x%x, qpn=0x%x ctrl->wqe_index=0x%x\n",
wqe_index, qp->mqp.qpn, wqe_index, qpn,
ctrl_wqe_index); ctrl_wqe_index);
return -EFAULT; return -EFAULT;
} }
ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >> ctrl_qpn = (be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_QPN_MASK) >>
MLX5_WQE_CTRL_QPN_SHIFT; MLX5_WQE_CTRL_QPN_SHIFT;
if (qp->mqp.qpn != ctrl_qpn) { if (qpn != ctrl_qpn) {
mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n", mlx5_ib_err(dev, "Got WQE with incorrect QP number. wqe_index=0x%x, qpn=0x%x ctrl->qpn=0x%x\n",
wqe_index, qp->mqp.qpn, wqe_index, qpn,
ctrl_qpn); ctrl_qpn);
return -EFAULT; return -EFAULT;
} }
...@@ -537,6 +540,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, ...@@ -537,6 +540,7 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
int resume_with_error = 0; int resume_with_error = 0;
u16 wqe_index = pfault->mpfault.wqe.wqe_index; u16 wqe_index = pfault->mpfault.wqe.wqe_index;
int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR; int requestor = pfault->mpfault.flags & MLX5_PFAULT_REQUESTOR;
u32 qpn = qp->trans_qp.base.mqp.qpn;
buffer = (char *)__get_free_page(GFP_KERNEL); buffer = (char *)__get_free_page(GFP_KERNEL);
if (!buffer) { if (!buffer) {
...@@ -546,10 +550,10 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, ...@@ -546,10 +550,10 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
} }
ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer, ret = mlx5_ib_read_user_wqe(qp, requestor, wqe_index, buffer,
PAGE_SIZE); PAGE_SIZE, &qp->trans_qp.base);
if (ret < 0) { if (ret < 0) {
mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n", mlx5_ib_err(dev, "Failed reading a WQE following page fault, error=%x, wqe_index=%x, qpn=%x\n",
-ret, wqe_index, qp->mqp.qpn); -ret, wqe_index, qpn);
resume_with_error = 1; resume_with_error = 1;
goto resolve_page_fault; goto resolve_page_fault;
} }
...@@ -586,7 +590,8 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp, ...@@ -586,7 +590,8 @@ static void mlx5_ib_mr_wqe_pfault_handler(struct mlx5_ib_qp *qp,
resolve_page_fault: resolve_page_fault:
mlx5_ib_page_fault_resume(qp, pfault, resume_with_error); mlx5_ib_page_fault_resume(qp, pfault, resume_with_error);
mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n", mlx5_ib_dbg(dev, "PAGE FAULT completed. QP 0x%x resume_with_error=%d, flags: 0x%x\n",
qp->mqp.qpn, resume_with_error, pfault->mpfault.flags); qpn, resume_with_error,
pfault->mpfault.flags);
free_page((unsigned long)buffer); free_page((unsigned long)buffer);
} }
...@@ -753,7 +758,7 @@ void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp) ...@@ -753,7 +758,7 @@ void mlx5_ib_odp_create_qp(struct mlx5_ib_qp *qp)
qp->disable_page_faults = 1; qp->disable_page_faults = 1;
spin_lock_init(&qp->disable_page_faults_lock); spin_lock_init(&qp->disable_page_faults_lock);
qp->mqp.pfault_handler = mlx5_ib_pfault_handler; qp->trans_qp.base.mqp.pfault_handler = mlx5_ib_pfault_handler;
for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i) for (i = 0; i < MLX5_IB_PAGEFAULT_CONTEXTS; ++i)
INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action); INIT_WORK(&qp->pagefaults[i].work, mlx5_ib_qp_pfault_action);
......
...@@ -116,14 +116,15 @@ void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n) ...@@ -116,14 +116,15 @@ void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
* Return: the number of bytes copied, or an error code. * Return: the number of bytes copied, or an error code.
*/ */
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index, int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length) void *buffer, u32 length,
struct mlx5_ib_qp_base *base)
{ {
struct ib_device *ibdev = qp->ibqp.device; struct ib_device *ibdev = qp->ibqp.device;
struct mlx5_ib_dev *dev = to_mdev(ibdev); struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
size_t offset; size_t offset;
size_t wq_end; size_t wq_end;
struct ib_umem *umem = qp->umem; struct ib_umem *umem = base->ubuffer.umem;
u32 first_copy_length; u32 first_copy_length;
int wqe_length; int wqe_length;
int ret; int ret;
...@@ -174,8 +175,10 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type) ...@@ -174,8 +175,10 @@ static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp; struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
struct ib_event event; struct ib_event event;
if (type == MLX5_EVENT_TYPE_PATH_MIG) if (type == MLX5_EVENT_TYPE_PATH_MIG) {
to_mibqp(qp)->port = to_mibqp(qp)->alt_port; /* This event is only valid for trans_qps */
to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
}
if (ibqp->event_handler) { if (ibqp->event_handler) {
event.device = ibqp->device; event.device = ibqp->device;
...@@ -368,7 +371,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr, ...@@ -368,7 +371,8 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
static int set_user_buf_size(struct mlx5_ib_dev *dev, static int set_user_buf_size(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp, struct mlx5_ib_qp *qp,
struct mlx5_ib_create_qp *ucmd) struct mlx5_ib_create_qp *ucmd,
struct mlx5_ib_qp_base *base)
{ {
int desc_sz = 1 << qp->sq.wqe_shift; int desc_sz = 1 << qp->sq.wqe_shift;
...@@ -393,8 +397,8 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev, ...@@ -393,8 +397,8 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
return -EINVAL; return -EINVAL;
} }
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << 6); (qp->sq.wqe_cnt << 6);
return 0; return 0;
} }
...@@ -592,13 +596,50 @@ static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn) ...@@ -592,13 +596,50 @@ static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index; return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
} }
static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
struct ib_pd *pd,
unsigned long addr, size_t size,
struct ib_umem **umem,
int *npages, int *page_shift, int *ncont,
u32 *offset)
{
int err;
*umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
}
mlx5_ib_cont_pages(*umem, addr, npages, page_shift, ncont, NULL);
err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
if (err) {
mlx5_ib_warn(dev, "bad offset\n");
goto err_umem;
}
mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
addr, size, *npages, *page_shift, *ncont, *offset);
return 0;
err_umem:
ib_umem_release(*umem);
*umem = NULL;
return err;
}
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct ib_udata *udata, struct mlx5_ib_qp *qp, struct ib_udata *udata,
struct mlx5_create_qp_mbox_in **in, struct mlx5_create_qp_mbox_in **in,
struct mlx5_ib_create_qp_resp *resp, int *inlen) struct mlx5_ib_create_qp_resp *resp, int *inlen,
struct mlx5_ib_qp_base *base)
{ {
struct mlx5_ib_ucontext *context; struct mlx5_ib_ucontext *context;
struct mlx5_ib_create_qp ucmd; struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
int page_shift = 0; int page_shift = 0;
int uar_index; int uar_index;
int npages; int npages;
...@@ -645,32 +686,20 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -645,32 +686,20 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB); qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
err = set_user_buf_size(dev, qp, &ucmd); err = set_user_buf_size(dev, qp, &ucmd, base);
if (err) if (err)
goto err_uuar; goto err_uuar;
if (ucmd.buf_addr && qp->buf_size) { if (ucmd.buf_addr && ubuffer->buf_size) {
qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, ubuffer->buf_addr = ucmd.buf_addr;
qp->buf_size, 0, 0); err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr,
if (IS_ERR(qp->umem)) { ubuffer->buf_size,
mlx5_ib_dbg(dev, "umem_get failed\n"); &ubuffer->umem, &npages, &page_shift,
err = PTR_ERR(qp->umem); &ncont, &offset);
if (err)
goto err_uuar; goto err_uuar;
}
} else { } else {
qp->umem = NULL; ubuffer->umem = NULL;
}
if (qp->umem) {
mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
&ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
if (err) {
mlx5_ib_warn(dev, "bad offset\n");
goto err_umem;
}
mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
} }
*inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont; *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
...@@ -679,8 +708,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -679,8 +708,9 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
err = -ENOMEM; err = -ENOMEM;
goto err_umem; goto err_umem;
} }
if (qp->umem) if (ubuffer->umem)
mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0); mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift,
(*in)->pas, 0);
(*in)->ctx.log_pg_sz_remote_qpn = (*in)->ctx.log_pg_sz_remote_qpn =
cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24); cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
(*in)->ctx.params2 = cpu_to_be32(offset << 6); (*in)->ctx.params2 = cpu_to_be32(offset << 6);
...@@ -711,29 +741,31 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -711,29 +741,31 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
kvfree(*in); kvfree(*in);
err_umem: err_umem:
if (qp->umem) if (ubuffer->umem)
ib_umem_release(qp->umem); ib_umem_release(ubuffer->umem);
err_uuar: err_uuar:
free_uuar(&context->uuari, uuarn); free_uuar(&context->uuari, uuarn);
return err; return err;
} }
static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp) static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp,
struct mlx5_ib_qp_base *base)
{ {
struct mlx5_ib_ucontext *context; struct mlx5_ib_ucontext *context;
context = to_mucontext(pd->uobject->context); context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &qp->db); mlx5_ib_db_unmap_user(context, &qp->db);
if (qp->umem) if (base->ubuffer.umem)
ib_umem_release(qp->umem); ib_umem_release(base->ubuffer.umem);
free_uuar(&context->uuari, qp->uuarn); free_uuar(&context->uuari, qp->uuarn);
} }
static int create_kernel_qp(struct mlx5_ib_dev *dev, static int create_kernel_qp(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr, struct ib_qp_init_attr *init_attr,
struct mlx5_ib_qp *qp, struct mlx5_ib_qp *qp,
struct mlx5_create_qp_mbox_in **in, int *inlen) struct mlx5_create_qp_mbox_in **in, int *inlen,
struct mlx5_ib_qp_base *base)
{ {
enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW; enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
struct mlx5_uuar_info *uuari; struct mlx5_uuar_info *uuari;
...@@ -765,9 +797,9 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, ...@@ -765,9 +797,9 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
qp->rq.offset = 0; qp->rq.offset = 0;
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift; qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift); base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
err = mlx5_buf_alloc(dev->mdev, qp->buf_size, &qp->buf); err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
if (err) { if (err) {
mlx5_ib_dbg(dev, "err %d\n", err); mlx5_ib_dbg(dev, "err %d\n", err);
goto err_uuar; goto err_uuar;
...@@ -866,6 +898,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -866,6 +898,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
{ {
struct mlx5_ib_resources *devr = &dev->devr; struct mlx5_ib_resources *devr = &dev->devr;
struct mlx5_core_dev *mdev = dev->mdev; struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_create_qp_resp resp; struct mlx5_ib_create_qp_resp resp;
struct mlx5_create_qp_mbox_in *in; struct mlx5_create_qp_mbox_in *in;
struct mlx5_ib_create_qp ucmd; struct mlx5_ib_create_qp ucmd;
...@@ -947,11 +980,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -947,11 +980,13 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
ucmd.sq_wqe_count, max_wqes); ucmd.sq_wqe_count, max_wqes);
return -EINVAL; return -EINVAL;
} }
err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen); err = create_user_qp(dev, pd, qp, udata, &in, &resp,
&inlen, base);
if (err) if (err)
mlx5_ib_dbg(dev, "err %d\n", err); mlx5_ib_dbg(dev, "err %d\n", err);
} else { } else {
err = create_kernel_qp(dev, init_attr, qp, &in, &inlen); err = create_kernel_qp(dev, init_attr, qp, &in, &inlen,
base);
if (err) if (err)
mlx5_ib_dbg(dev, "err %d\n", err); mlx5_ib_dbg(dev, "err %d\n", err);
} }
...@@ -1060,26 +1095,22 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd, ...@@ -1060,26 +1095,22 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
MLX5_SET(qpc, qpc, user_index, uidx); MLX5_SET(qpc, qpc, user_index, uidx);
} }
err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen); err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
if (err) { if (err) {
mlx5_ib_dbg(dev, "create qp failed\n"); mlx5_ib_dbg(dev, "create qp failed\n");
goto err_create; goto err_create;
} }
kvfree(in); kvfree(in);
/* Hardware wants QPN written in big-endian order (after
* shifting) for send doorbell. Precompute this value to save
* a little bit when posting sends.
*/
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
qp->mqp.event = mlx5_ib_qp_event; base->container_mibqp = qp;
base->mqp.event = mlx5_ib_qp_event;
return 0; return 0;
err_create: err_create:
if (qp->create_type == MLX5_QP_USER) if (qp->create_type == MLX5_QP_USER)
destroy_qp_user(pd, qp); destroy_qp_user(pd, qp, base);
else if (qp->create_type == MLX5_QP_KERNEL) else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp); destroy_qp_kernel(dev, qp);
...@@ -1188,6 +1219,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) ...@@ -1188,6 +1219,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{ {
struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_modify_qp_mbox_in *in; struct mlx5_modify_qp_mbox_in *in;
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
int err; int err;
in = kzalloc(sizeof(*in), GFP_KERNEL); in = kzalloc(sizeof(*in), GFP_KERNEL);
...@@ -1197,32 +1229,34 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp) ...@@ -1197,32 +1229,34 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
if (qp->state != IB_QPS_RESET) { if (qp->state != IB_QPS_RESET) {
mlx5_ib_qp_disable_pagefaults(qp); mlx5_ib_qp_disable_pagefaults(qp);
if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state), if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
MLX5_QP_STATE_RST, in, 0, &qp->mqp)) MLX5_QP_STATE_RST, in, 0,
&base->mqp))
mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n", mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
qp->mqp.qpn); base->mqp.qpn);
} }
get_cqs(qp, &send_cq, &recv_cq); get_cqs(qp, &send_cq, &recv_cq);
if (qp->create_type == MLX5_QP_KERNEL) { if (qp->create_type == MLX5_QP_KERNEL) {
mlx5_ib_lock_cqs(send_cq, recv_cq); mlx5_ib_lock_cqs(send_cq, recv_cq);
__mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, __mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq) if (send_cq != recv_cq)
__mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); __mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
NULL);
mlx5_ib_unlock_cqs(send_cq, recv_cq); mlx5_ib_unlock_cqs(send_cq, recv_cq);
} }
err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp); err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
if (err) if (err)
mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn); mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", base->mqp.qpn);
kfree(in); kfree(in);
if (qp->create_type == MLX5_QP_KERNEL) if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp); destroy_qp_kernel(dev, qp);
else if (qp->create_type == MLX5_QP_USER) else if (qp->create_type == MLX5_QP_USER)
destroy_qp_user(&get_pd(qp)->ibpd, qp); destroy_qp_user(&get_pd(qp)->ibpd, qp, base);
} }
static const char *ib_qp_type_str(enum ib_qp_type type) static const char *ib_qp_type_str(enum ib_qp_type type)
...@@ -1314,13 +1348,14 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, ...@@ -1314,13 +1348,14 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
else if (is_qp1(init_attr->qp_type)) else if (is_qp1(init_attr->qp_type))
qp->ibqp.qp_num = 1; qp->ibqp.qp_num = 1;
else else
qp->ibqp.qp_num = qp->mqp.qpn; qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n", mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn, qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
to_mcq(init_attr->recv_cq)->mcq.cqn,
to_mcq(init_attr->send_cq)->mcq.cqn); to_mcq(init_attr->send_cq)->mcq.cqn);
qp->xrcdn = xrcdn; qp->trans_qp.xrcdn = xrcdn;
break; break;
...@@ -1360,12 +1395,12 @@ static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_att ...@@ -1360,12 +1395,12 @@ static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_att
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic; dest_rd_atomic = attr->max_dest_rd_atomic;
else else
dest_rd_atomic = qp->resp_depth; dest_rd_atomic = qp->trans_qp.resp_depth;
if (attr_mask & IB_QP_ACCESS_FLAGS) if (attr_mask & IB_QP_ACCESS_FLAGS)
access_flags = attr->qp_access_flags; access_flags = attr->qp_access_flags;
else else
access_flags = qp->atomic_rd_en; access_flags = qp->trans_qp.atomic_rd_en;
if (!dest_rd_atomic) if (!dest_rd_atomic)
access_flags &= IB_ACCESS_REMOTE_WRITE; access_flags &= IB_ACCESS_REMOTE_WRITE;
...@@ -1610,6 +1645,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -1610,6 +1645,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
{ {
struct mlx5_ib_dev *dev = to_mdev(ibqp->device); struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *qp = to_mqp(ibqp); struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_cq *send_cq, *recv_cq; struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context; struct mlx5_qp_context *context;
struct mlx5_modify_qp_mbox_in *in; struct mlx5_modify_qp_mbox_in *in;
...@@ -1769,7 +1805,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -1769,7 +1805,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
in->optparam = cpu_to_be32(optpar); in->optparam = cpu_to_be32(optpar);
err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state), err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
to_mlx5_state(new_state), in, sqd_event, to_mlx5_state(new_state), in, sqd_event,
&qp->mqp); &base->mqp);
if (err) if (err)
goto out; goto out;
...@@ -1779,23 +1815,23 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp, ...@@ -1779,23 +1815,23 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
qp->state = new_state; qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS) if (attr_mask & IB_QP_ACCESS_FLAGS)
qp->atomic_rd_en = attr->qp_access_flags; qp->trans_qp.atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
qp->resp_depth = attr->max_dest_rd_atomic; qp->trans_qp.resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT) if (attr_mask & IB_QP_PORT)
qp->port = attr->port_num; qp->port = attr->port_num;
if (attr_mask & IB_QP_ALT_PATH) if (attr_mask & IB_QP_ALT_PATH)
qp->alt_port = attr->alt_port_num; qp->trans_qp.alt_port = attr->alt_port_num;
/* /*
* If we moved a kernel QP to RESET, clean up all old CQ * If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP. * entries and reinitialize the QP.
*/ */
if (new_state == IB_QPS_RESET && !ibqp->uobject) { if (new_state == IB_QPS_RESET && !ibqp->uobject) {
mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn, mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
ibqp->srq ? to_msrq(ibqp->srq) : NULL); ibqp->srq ? to_msrq(ibqp->srq) : NULL);
if (send_cq != recv_cq) if (send_cq != recv_cq)
mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL); mlx5_ib_cq_clean(send_cq, base->mqp.qpn, NULL);
qp->rq.head = 0; qp->rq.head = 0;
qp->rq.tail = 0; qp->rq.tail = 0;
...@@ -2631,7 +2667,7 @@ static void finish_wqe(struct mlx5_ib_qp *qp, ...@@ -2631,7 +2667,7 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
mlx5_opcode | ((u32)opmod << 24)); mlx5_opcode | ((u32)opmod << 24));
ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8)); ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
ctrl->fm_ce_se |= fence; ctrl->fm_ce_se |= fence;
qp->fm_cache = next_fence; qp->fm_cache = next_fence;
if (unlikely(qp->wq_sig)) if (unlikely(qp->wq_sig))
...@@ -3107,7 +3143,8 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr ...@@ -3107,7 +3143,8 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
goto out; goto out;
} }
context = &outb->ctx; context = &outb->ctx;
err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb)); err = mlx5_core_qp_query(dev->mdev, &qp->trans_qp.base.mqp, outb,
sizeof(*outb));
if (err) if (err)
goto out_free; goto out_free;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment