Commit 9603b61d authored by Jack Morgenstein's avatar Jack Morgenstein Committed by David S. Miller

mlx5: Move pci device handling from mlx5_ib to mlx5_core

In preparation for a new mlx5 device which is VPI (i.e., ports can be
either IB or ETH), move the pci device functionality from mlx5_ib
to mlx5_core.

This involves the following changes:
1. Move mlx5_core_dev struct out of mlx5_ib_dev. mlx5_core_dev
   is now an independent structure maintained by mlx5_core.
   mlx5_ib_dev now has a pointer to that struct.
   This requires changing a lot of places where the core_dev
   struct was accessed via mlx5_ib_dev (now, this needs to
   be a pointer dereference).
2. All PCI initializations are now done in mlx5_core. Thus,
   it is now mlx5_core which does pci_register_device (and not
   mlx5_ib, as was previously).
3. mlx5_ib now registers itself with mlx5_core as an "interface"
   driver. This is very similar to the mechanism employed for
   the mlx4 (ConnectX) driver. Once the HCA is initialized
   (by mlx5_core), it invokes the interface drivers to do
   their initializations.
4. There is a new event handler which the core registers:
   mlx5_core_event(). This event handler invokes the
   event handlers registered by the interfaces.

Based on a patch by Eli Cohen <eli@mellanox.com>
Signed-off-by: default avatarJack Morgenstein <jackm@dev.mellanox.co.il>
Signed-off-by: default avatarEli Cohen <eli@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4ada97ab
......@@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
struct mlx5_core_srq *msrq = NULL;
if (qp->ibqp.xrcd) {
msrq = mlx5_core_get_srq(&dev->mdev,
msrq = mlx5_core_get_srq(dev->mdev,
be32_to_cpu(cqe->srqn));
srq = to_mibsrq(msrq);
} else {
......@@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
{
mlx5_buf_free(&dev->mdev, &buf->buf);
mlx5_buf_free(dev->mdev, &buf->buf);
}
static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
......@@ -450,7 +450,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
* because CQs will be locked while QPs are removed
* from the table.
*/
mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
mqp = __mlx5_qp_lookup(dev->mdev, qpn);
if (unlikely(!mqp)) {
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
cq->mcq.cqn, qpn);
......@@ -514,11 +514,11 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
case MLX5_CQE_SIG_ERR:
sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
read_lock(&dev->mdev.priv.mr_table.lock);
mmr = __mlx5_mr_lookup(&dev->mdev,
read_lock(&dev->mdev->priv.mr_table.lock);
mmr = __mlx5_mr_lookup(dev->mdev,
mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
if (unlikely(!mmr)) {
read_unlock(&dev->mdev.priv.mr_table.lock);
read_unlock(&dev->mdev->priv.mr_table.lock);
mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
return -EINVAL;
......@@ -536,7 +536,7 @@ static int mlx5_poll_one(struct mlx5_ib_cq *cq,
mr->sig->err_item.expected,
mr->sig->err_item.actual);
read_unlock(&dev->mdev.priv.mr_table.lock);
read_unlock(&dev->mdev->priv.mr_table.lock);
goto repoll;
}
......@@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
mlx5_cq_arm(&to_mcq(ibcq)->mcq,
(flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
return 0;
}
......@@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
{
int err;
err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
PAGE_SIZE * 2, &buf->buf);
if (err)
return err;
......@@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
{
int err;
err = mlx5_db_alloc(&dev->mdev, &cq->db);
err = mlx5_db_alloc(dev->mdev, &cq->db);
if (err)
return err;
......@@ -716,7 +716,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
(*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
*index = dev->mdev.priv.uuari.uars[0].index;
*index = dev->mdev->priv.uuari.uars[0].index;
return 0;
......@@ -724,14 +724,14 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
free_cq_buf(dev, &cq->buf);
err_db:
mlx5_db_free(&dev->mdev, &cq->db);
mlx5_db_free(dev->mdev, &cq->db);
return err;
}
static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
{
free_cq_buf(dev, &cq->buf);
mlx5_db_free(&dev->mdev, &cq->db);
mlx5_db_free(dev->mdev, &cq->db);
}
struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
......@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
return ERR_PTR(-EINVAL);
entries = roundup_pow_of_two(entries + 1);
if (entries > dev->mdev.caps.max_cqes)
if (entries > dev->mdev->caps.max_cqes)
return ERR_PTR(-EINVAL);
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
......@@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
cqb->ctx.c_eqn = cpu_to_be16(eqn);
cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
if (err)
goto err_cqb;
......@@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
return &cq->ibcq;
err_cmd:
mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
err_cqb:
mlx5_vfree(cqb);
......@@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
if (cq->uobject)
context = cq->uobject->context;
mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
if (context)
destroy_cq_user(mcq, context);
else
......@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
int err;
u32 fsel;
if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
return -ENOSYS;
in = kzalloc(sizeof(*in), GFP_KERNEL);
......@@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
in->ctx.cq_period = cpu_to_be16(cq_period);
in->ctx.cq_max_count = cpu_to_be16(cq_count);
in->field_select = cpu_to_be32(fsel);
err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
kfree(in);
if (err)
......@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
int uninitialized_var(cqe_size);
unsigned long flags;
if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
pr_info("Firmware does not support resize CQ\n");
return -ENOSYS;
}
......@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
return -EINVAL;
entries = roundup_pow_of_two(entries + 1);
if (entries > dev->mdev.caps.max_cqes + 1)
if (entries > dev->mdev->caps.max_cqes + 1)
return -EINVAL;
if (entries == ibcq->cqe + 1)
......@@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
in->cqn = cpu_to_be32(cq->mcq.cqn);
err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
if (err)
goto ex_alloc;
......
......@@ -54,7 +54,7 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
if (ignore_bkey || !in_wc)
op_modifier |= 0x2;
return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
}
int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
......@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
packet_error = be16_to_cpu(out_mad->status);
dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
out:
......
This diff is collapsed.
......@@ -360,7 +360,7 @@ struct mlx5_ib_resources {
struct mlx5_ib_dev {
struct ib_device ib_dev;
struct mlx5_core_dev mdev;
struct mlx5_core_dev *mdev;
MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
struct list_head eqs_list;
int num_ports;
......@@ -454,16 +454,6 @@ static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
return container_of(ibah, struct mlx5_ib_ah, ibah);
}
static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev)
{
return container_of(dev, struct mlx5_ib_dev, mdev);
}
static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev)
{
return mlx5_core2ibdev(pci2mlx5_core_dev(pdev));
}
int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
struct mlx5_db *db);
void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
......
......@@ -73,7 +73,7 @@ static void reg_mr_callback(int status, void *context)
struct mlx5_cache_ent *ent = &cache->ent[c];
u8 key;
unsigned long flags;
struct mlx5_mr_table *table = &dev->mdev.priv.mr_table;
struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
int err;
spin_lock_irqsave(&ent->lock, flags);
......@@ -97,9 +97,9 @@ static void reg_mr_callback(int status, void *context)
return;
}
spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags);
key = dev->mdev.priv.mkey_key++;
spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags);
spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
key = dev->mdev->priv.mkey_key++;
spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
cache->last_add = jiffies;
......@@ -155,7 +155,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
spin_lock_irq(&ent->lock);
ent->pending++;
spin_unlock_irq(&ent->lock);
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
sizeof(*in), reg_mr_callback,
mr, &mr->out);
if (err) {
......@@ -188,7 +188,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
......@@ -479,7 +479,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
ent->cur--;
ent->size--;
spin_unlock_irq(&ent->lock);
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
if (err)
mlx5_ib_warn(dev, "failed destroy mkey\n");
else
......@@ -496,7 +496,7 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
if (!mlx5_debugfs_root)
return 0;
cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root);
cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
if (!cache->root)
return -ENOMEM;
......@@ -571,8 +571,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
ent->order = i + 2;
ent->dev = dev;
if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE)
limit = dev->mdev.profile->mr_cache[i].limit;
if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
limit = dev->mdev->profile->mr_cache[i].limit;
else
limit = 0;
......@@ -610,7 +610,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_core_dev *mdev = &dev->mdev;
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_mkey_seg *seg;
struct mlx5_ib_mr *mr;
......@@ -846,7 +846,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
1 << page_shift));
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL,
err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
NULL, NULL);
if (err) {
mlx5_ib_warn(dev, "create mkey failed\n");
......@@ -923,7 +923,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr->umem = umem;
mr->npages = npages;
spin_lock(&dev->mr_lock);
dev->mdev.priv.reg_pages += npages;
dev->mdev->priv.reg_pages += npages;
spin_unlock(&dev->mr_lock);
mr->ibmr.lkey = mr->mmr.key;
mr->ibmr.rkey = mr->mmr.key;
......@@ -978,7 +978,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
int err;
if (!umred) {
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
......@@ -996,7 +996,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
if (umem) {
ib_umem_release(umem);
spin_lock(&dev->mr_lock);
dev->mdev.priv.reg_pages -= npages;
dev->mdev->priv.reg_pages -= npages;
spin_unlock(&dev->mr_lock);
}
......@@ -1044,7 +1044,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
}
/* create mem & wire PSVs */
err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn,
err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
2, psv_index);
if (err)
goto err_free_sig;
......@@ -1060,7 +1060,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
}
in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in),
err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
NULL, NULL, NULL);
if (err)
goto err_destroy_psv;
......@@ -1074,11 +1074,11 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
err_destroy_psv:
if (mr->sig) {
if (mlx5_core_destroy_psv(&dev->mdev,
if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_memory.psv_idx))
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
mr->sig->psv_memory.psv_idx);
if (mlx5_core_destroy_psv(&dev->mdev,
if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
......@@ -1099,18 +1099,18 @@ int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
int err;
if (mr->sig) {
if (mlx5_core_destroy_psv(&dev->mdev,
if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_memory.psv_idx))
mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
mr->sig->psv_memory.psv_idx);
if (mlx5_core_destroy_psv(&dev->mdev,
if (mlx5_core_destroy_psv(dev->mdev,
mr->sig->psv_wire.psv_idx))
mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
mr->sig->psv_wire.psv_idx);
kfree(mr->sig);
}
err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
mr->mmr.key, err);
......@@ -1149,7 +1149,7 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
* TBD not needed - issue 197292 */
in->seg.log2_page_size = PAGE_SHIFT;
err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
NULL, NULL);
kfree(in);
if (err)
......@@ -1202,7 +1202,7 @@ void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
struct mlx5_ib_dev *dev = to_mdev(page_list->device);
int size = page_list->max_page_list_len * sizeof(u64);
dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list,
dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
mfrpl->map);
kfree(mfrpl->ibfrpl.page_list);
kfree(mfrpl);
......
This diff is collapsed.
......@@ -159,7 +159,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
int page_shift;
int npages;
err = mlx5_db_alloc(&dev->mdev, &srq->db);
err = mlx5_db_alloc(dev->mdev, &srq->db);
if (err) {
mlx5_ib_warn(dev, "alloc dbell rec failed\n");
return err;
......@@ -167,7 +167,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
*srq->db.db = 0;
if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
mlx5_ib_dbg(dev, "buf alloc failed\n");
err = -ENOMEM;
goto err_db;
......@@ -212,10 +212,10 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
mlx5_vfree(*in);
err_buf:
mlx5_buf_free(&dev->mdev, &srq->buf);
mlx5_buf_free(dev->mdev, &srq->buf);
err_db:
mlx5_db_free(&dev->mdev, &srq->db);
mlx5_db_free(dev->mdev, &srq->db);
return err;
}
......@@ -229,8 +229,8 @@ static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
{
kfree(srq->wrid);
mlx5_buf_free(&dev->mdev, &srq->buf);
mlx5_db_free(&dev->mdev, &srq->db);
mlx5_buf_free(dev->mdev, &srq->buf);
mlx5_db_free(dev->mdev, &srq->db);
}
struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
......@@ -248,10 +248,10 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
u32 flgs, xrcdn;
/* Sanity check SRQ size before proceeding */
if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) {
if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) {
mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
init_attr->attr.max_wr,
dev->mdev.caps.max_srq_wqes);
dev->mdev->caps.max_srq_wqes);
return ERR_PTR(-EINVAL);
}
......@@ -303,7 +303,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
in->ctx.db_record = cpu_to_be64(srq->db.dma);
err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen);
err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
mlx5_vfree(in);
if (err) {
mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
......@@ -327,7 +327,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
return &srq->ibsrq;
err_core:
mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
err_usr_kern_srq:
if (pd->uobject)
......@@ -357,7 +357,7 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
return -EINVAL;
mutex_lock(&srq->mutex);
ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1);
ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
mutex_unlock(&srq->mutex);
if (ret)
......@@ -378,7 +378,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
if (!out)
return -ENOMEM;
ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out);
ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
if (ret)
goto out_box;
......@@ -396,7 +396,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
struct mlx5_ib_dev *dev = to_mdev(srq->device);
struct mlx5_ib_srq *msrq = to_msrq(srq);
mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq);
mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
if (srq->uobject) {
mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
......
......@@ -58,7 +58,100 @@ int mlx5_core_debug_mask;
module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
#define MLX5_DEFAULT_PROF 2
static int prof_sel = MLX5_DEFAULT_PROF;
module_param_named(prof_sel, prof_sel, int, 0444);
MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
struct workqueue_struct *mlx5_core_wq;
static LIST_HEAD(intf_list);
static LIST_HEAD(dev_list);
static DEFINE_MUTEX(intf_mutex);
struct mlx5_device_context {
struct list_head list;
struct mlx5_interface *intf;
void *context;
};
static struct mlx5_profile profile[] = {
[0] = {
.mask = 0,
},
[1] = {
.mask = MLX5_PROF_MASK_QP_SIZE,
.log_max_qp = 12,
},
[2] = {
.mask = MLX5_PROF_MASK_QP_SIZE |
MLX5_PROF_MASK_MR_CACHE,
.log_max_qp = 17,
.mr_cache[0] = {
.size = 500,
.limit = 250
},
.mr_cache[1] = {
.size = 500,
.limit = 250
},
.mr_cache[2] = {
.size = 500,
.limit = 250
},
.mr_cache[3] = {
.size = 500,
.limit = 250
},
.mr_cache[4] = {
.size = 500,
.limit = 250
},
.mr_cache[5] = {
.size = 500,
.limit = 250
},
.mr_cache[6] = {
.size = 500,
.limit = 250
},
.mr_cache[7] = {
.size = 500,
.limit = 250
},
.mr_cache[8] = {
.size = 500,
.limit = 250
},
.mr_cache[9] = {
.size = 500,
.limit = 250
},
.mr_cache[10] = {
.size = 500,
.limit = 250
},
.mr_cache[11] = {
.size = 500,
.limit = 250
},
.mr_cache[12] = {
.size = 64,
.limit = 32
},
.mr_cache[13] = {
.size = 32,
.limit = 16
},
.mr_cache[14] = {
.size = 16,
.limit = 8
},
.mr_cache[15] = {
.size = 8,
.limit = 4
},
},
};
static int set_dma_caps(struct pci_dev *pdev)
{
......@@ -299,7 +392,7 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
return 0;
}
int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
int err;
......@@ -489,7 +582,7 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
}
EXPORT_SYMBOL(mlx5_dev_init);
void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
......@@ -516,7 +609,190 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
pci_disable_device(dev->pdev);
debugfs_remove(priv->dbg_root);
}
EXPORT_SYMBOL(mlx5_dev_cleanup);
static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_device_context *dev_ctx;
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
if (!dev_ctx) {
pr_warn("mlx5_add_device: alloc context failed\n");
return;
}
dev_ctx->intf = intf;
dev_ctx->context = intf->add(dev);
if (dev_ctx->context) {
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
spin_unlock_irq(&priv->ctx_lock);
} else {
kfree(dev_ctx);
}
}
static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
{
struct mlx5_device_context *dev_ctx;
struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf == intf) {
spin_lock_irq(&priv->ctx_lock);
list_del(&dev_ctx->list);
spin_unlock_irq(&priv->ctx_lock);
intf->remove(dev, dev_ctx->context);
kfree(dev_ctx);
return;
}
}
static int mlx5_register_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_interface *intf;
mutex_lock(&intf_mutex);
list_add_tail(&priv->dev_list, &dev_list);
list_for_each_entry(intf, &intf_list, list)
mlx5_add_device(intf, priv);
mutex_unlock(&intf_mutex);
return 0;
}
static void mlx5_unregister_device(struct mlx5_core_dev *dev)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_interface *intf;
mutex_lock(&intf_mutex);
list_for_each_entry(intf, &intf_list, list)
mlx5_remove_device(intf, priv);
list_del(&priv->dev_list);
mutex_unlock(&intf_mutex);
}
int mlx5_register_interface(struct mlx5_interface *intf)
{
struct mlx5_priv *priv;
if (!intf->add || !intf->remove)
return -EINVAL;
mutex_lock(&intf_mutex);
list_add_tail(&intf->list, &intf_list);
list_for_each_entry(priv, &dev_list, dev_list)
mlx5_add_device(intf, priv);
mutex_unlock(&intf_mutex);
return 0;
}
EXPORT_SYMBOL(mlx5_register_interface);
void mlx5_unregister_interface(struct mlx5_interface *intf)
{
struct mlx5_priv *priv;
mutex_lock(&intf_mutex);
list_for_each_entry(priv, &dev_list, dev_list)
mlx5_remove_device(intf, priv);
list_del(&intf->list);
mutex_unlock(&intf_mutex);
}
EXPORT_SYMBOL(mlx5_unregister_interface);
static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
void *data)
{
struct mlx5_priv *priv = &dev->priv;
struct mlx5_device_context *dev_ctx;
unsigned long flags;
spin_lock_irqsave(&priv->ctx_lock, flags);
list_for_each_entry(dev_ctx, &priv->ctx_list, list)
if (dev_ctx->intf->event)
dev_ctx->intf->event(dev, dev_ctx->context, event, data);
spin_unlock_irqrestore(&priv->ctx_lock, flags);
}
struct mlx5_core_event_handler {
void (*event)(struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
void *data);
};
static int init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct mlx5_core_dev *dev;
struct mlx5_priv *priv;
int err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
dev_err(&pdev->dev, "kzalloc failed\n");
return -ENOMEM;
}
priv = &dev->priv;
pci_set_drvdata(pdev, dev);
if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
pr_warn("selected profile out of range, selecting default (%d)\n",
MLX5_DEFAULT_PROF);
prof_sel = MLX5_DEFAULT_PROF;
}
dev->profile = &profile[prof_sel];
dev->event = mlx5_core_event;
err = mlx5_dev_init(dev, pdev);
if (err) {
dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
goto out;
}
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
err = mlx5_register_device(dev);
if (err) {
dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
goto out_init;
}
return 0;
out_init:
mlx5_dev_cleanup(dev);
out:
kfree(dev);
return err;
}
static void remove_one(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
mlx5_unregister_device(dev);
mlx5_dev_cleanup(dev);
kfree(dev);
}
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
static struct pci_driver mlx5_core_driver = {
.name = DRIVER_NAME,
.id_table = mlx5_core_pci_table,
.probe = init_one,
.remove = remove_one
};
static int __init init(void)
{
......@@ -530,8 +806,15 @@ static int __init init(void)
}
mlx5_health_init();
err = pci_register_driver(&mlx5_core_driver);
if (err)
goto err_health;
return 0;
err_health:
mlx5_health_cleanup();
destroy_workqueue(mlx5_core_wq);
err_debug:
mlx5_unregister_debugfs();
return err;
......@@ -539,6 +822,7 @@ static int __init init(void)
static void __exit cleanup(void)
{
pci_unregister_driver(&mlx5_core_driver);
mlx5_health_cleanup();
destroy_workqueue(mlx5_core_wq);
mlx5_unregister_debugfs();
......
......@@ -543,6 +543,10 @@ struct mlx5_priv {
/* protect mkey key part */
spinlock_t mkey_lock;
u8 mkey_key;
struct list_head dev_list;
struct list_head ctx_list;
spinlock_t ctx_lock;
};
struct mlx5_core_dev {
......@@ -686,8 +690,6 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
......@@ -811,6 +813,17 @@ enum {
MAX_MR_CACHE_ENTRIES = 16,
};
struct mlx5_interface {
void * (*add)(struct mlx5_core_dev *dev);
void (*remove)(struct mlx5_core_dev *dev, void *context);
void (*event)(struct mlx5_core_dev *dev, void *context,
enum mlx5_dev_event event, void *data);
struct list_head list;
};
int mlx5_register_interface(struct mlx5_interface *intf);
void mlx5_unregister_interface(struct mlx5_interface *intf);
struct mlx5_profile {
u64 mask;
u32 log_max_qp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment