Commit 7f645a58 authored by Kamal Heib's avatar Kamal Heib Committed by Jason Gunthorpe

RDMA/hns: Initialize ib_device_ops struct

Initialize ib_device_ops with the supported operations using
ib_set_device_ops().
Signed-off-by: default avatarKamal Heib <kamalheib1@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent e3c320ca
...@@ -883,6 +883,8 @@ struct hns_roce_hw { ...@@ -883,6 +883,8 @@ struct hns_roce_hw {
int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr); int (*query_srq)(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, int (*post_srq_recv)(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr); const struct ib_recv_wr **bad_wr);
const struct ib_device_ops *hns_roce_dev_ops;
const struct ib_device_ops *hns_roce_dev_srq_ops;
}; };
struct hns_roce_dev { struct hns_roce_dev {
......
...@@ -4793,6 +4793,16 @@ static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev) ...@@ -4793,6 +4793,16 @@ static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev)
kfree(eq_table->eq); kfree(eq_table->eq);
} }
static const struct ib_device_ops hns_roce_v1_dev_ops = {
.destroy_qp = hns_roce_v1_destroy_qp,
.modify_cq = hns_roce_v1_modify_cq,
.poll_cq = hns_roce_v1_poll_cq,
.post_recv = hns_roce_v1_post_recv,
.post_send = hns_roce_v1_post_send,
.query_qp = hns_roce_v1_query_qp,
.req_notify_cq = hns_roce_v1_req_notify_cq,
};
static const struct hns_roce_hw hns_roce_hw_v1 = { static const struct hns_roce_hw hns_roce_hw_v1 = {
.reset = hns_roce_v1_reset, .reset = hns_roce_v1_reset,
.hw_profile = hns_roce_v1_profile, .hw_profile = hns_roce_v1_profile,
...@@ -4818,6 +4828,7 @@ static const struct hns_roce_hw hns_roce_hw_v1 = { ...@@ -4818,6 +4828,7 @@ static const struct hns_roce_hw hns_roce_hw_v1 = {
.destroy_cq = hns_roce_v1_destroy_cq, .destroy_cq = hns_roce_v1_destroy_cq,
.init_eq = hns_roce_v1_init_eq_table, .init_eq = hns_roce_v1_init_eq_table,
.cleanup_eq = hns_roce_v1_cleanup_eq_table, .cleanup_eq = hns_roce_v1_cleanup_eq_table,
.hns_roce_dev_ops = &hns_roce_v1_dev_ops,
}; };
static const struct of_device_id hns_roce_of_match[] = { static const struct of_device_id hns_roce_of_match[] = {
......
...@@ -5734,6 +5734,22 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -5734,6 +5734,22 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
return ret; return ret;
} }
static const struct ib_device_ops hns_roce_v2_dev_ops = {
.destroy_qp = hns_roce_v2_destroy_qp,
.modify_cq = hns_roce_v2_modify_cq,
.poll_cq = hns_roce_v2_poll_cq,
.post_recv = hns_roce_v2_post_recv,
.post_send = hns_roce_v2_post_send,
.query_qp = hns_roce_v2_query_qp,
.req_notify_cq = hns_roce_v2_req_notify_cq,
};
static const struct ib_device_ops hns_roce_v2_dev_srq_ops = {
.modify_srq = hns_roce_v2_modify_srq,
.post_srq_recv = hns_roce_v2_post_srq_recv,
.query_srq = hns_roce_v2_query_srq,
};
static const struct hns_roce_hw hns_roce_hw_v2 = { static const struct hns_roce_hw hns_roce_hw_v2 = {
.cmq_init = hns_roce_v2_cmq_init, .cmq_init = hns_roce_v2_cmq_init,
.cmq_exit = hns_roce_v2_cmq_exit, .cmq_exit = hns_roce_v2_cmq_exit,
...@@ -5765,6 +5781,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = { ...@@ -5765,6 +5781,8 @@ static const struct hns_roce_hw hns_roce_hw_v2 = {
.modify_srq = hns_roce_v2_modify_srq, .modify_srq = hns_roce_v2_modify_srq,
.query_srq = hns_roce_v2_query_srq, .query_srq = hns_roce_v2_query_srq,
.post_srq_recv = hns_roce_v2_post_srq_recv, .post_srq_recv = hns_roce_v2_post_srq_recv,
.hns_roce_dev_ops = &hns_roce_v2_dev_ops,
.hns_roce_dev_srq_ops = &hns_roce_v2_dev_srq_ops,
}; };
static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = { static const struct pci_device_id hns_roce_hw_v2_pci_tbl[] = {
......
...@@ -445,6 +445,54 @@ static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev) ...@@ -445,6 +445,54 @@ static void hns_roce_unregister_device(struct hns_roce_dev *hr_dev)
ib_unregister_device(&hr_dev->ib_dev); ib_unregister_device(&hr_dev->ib_dev);
} }
static const struct ib_device_ops hns_roce_dev_ops = {
.add_gid = hns_roce_add_gid,
.alloc_pd = hns_roce_alloc_pd,
.alloc_ucontext = hns_roce_alloc_ucontext,
.create_ah = hns_roce_create_ah,
.create_cq = hns_roce_ib_create_cq,
.create_qp = hns_roce_create_qp,
.dealloc_pd = hns_roce_dealloc_pd,
.dealloc_ucontext = hns_roce_dealloc_ucontext,
.del_gid = hns_roce_del_gid,
.dereg_mr = hns_roce_dereg_mr,
.destroy_ah = hns_roce_destroy_ah,
.destroy_cq = hns_roce_ib_destroy_cq,
.disassociate_ucontext = hns_roce_disassociate_ucontext,
.get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer,
.get_netdev = hns_roce_get_netdev,
.get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap,
.modify_device = hns_roce_modify_device,
.modify_port = hns_roce_modify_port,
.modify_qp = hns_roce_modify_qp,
.query_ah = hns_roce_query_ah,
.query_device = hns_roce_query_device,
.query_pkey = hns_roce_query_pkey,
.query_port = hns_roce_query_port,
.reg_user_mr = hns_roce_reg_user_mr,
};
static const struct ib_device_ops hns_roce_dev_mr_ops = {
.rereg_user_mr = hns_roce_rereg_user_mr,
};
static const struct ib_device_ops hns_roce_dev_mw_ops = {
.alloc_mw = hns_roce_alloc_mw,
.dealloc_mw = hns_roce_dealloc_mw,
};
static const struct ib_device_ops hns_roce_dev_frmr_ops = {
.alloc_mr = hns_roce_alloc_mr,
.map_mr_sg = hns_roce_map_mr_sg,
};
static const struct ib_device_ops hns_roce_dev_srq_ops = {
.create_srq = hns_roce_create_srq,
.destroy_srq = hns_roce_destroy_srq,
};
static int hns_roce_register_device(struct hns_roce_dev *hr_dev) static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
{ {
int ret; int ret;
...@@ -484,88 +532,38 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -484,88 +532,38 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->uverbs_ex_cmd_mask |= ib_dev->uverbs_ex_cmd_mask |=
(1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ); (1ULL << IB_USER_VERBS_EX_CMD_MODIFY_CQ);
/* HCA||device||port */
ib_dev->modify_device = hns_roce_modify_device;
ib_dev->query_device = hns_roce_query_device;
ib_dev->query_port = hns_roce_query_port;
ib_dev->modify_port = hns_roce_modify_port;
ib_dev->get_link_layer = hns_roce_get_link_layer;
ib_dev->get_netdev = hns_roce_get_netdev;
ib_dev->add_gid = hns_roce_add_gid;
ib_dev->del_gid = hns_roce_del_gid;
ib_dev->query_pkey = hns_roce_query_pkey;
ib_dev->alloc_ucontext = hns_roce_alloc_ucontext;
ib_dev->dealloc_ucontext = hns_roce_dealloc_ucontext;
ib_dev->mmap = hns_roce_mmap;
/* PD */
ib_dev->alloc_pd = hns_roce_alloc_pd;
ib_dev->dealloc_pd = hns_roce_dealloc_pd;
/* AH */
ib_dev->create_ah = hns_roce_create_ah;
ib_dev->query_ah = hns_roce_query_ah;
ib_dev->destroy_ah = hns_roce_destroy_ah;
/* QP */
ib_dev->create_qp = hns_roce_create_qp;
ib_dev->modify_qp = hns_roce_modify_qp;
ib_dev->query_qp = hr_dev->hw->query_qp;
ib_dev->destroy_qp = hr_dev->hw->destroy_qp;
ib_dev->post_send = hr_dev->hw->post_send;
ib_dev->post_recv = hr_dev->hw->post_recv;
/* CQ */
ib_dev->create_cq = hns_roce_ib_create_cq;
ib_dev->modify_cq = hr_dev->hw->modify_cq;
ib_dev->destroy_cq = hns_roce_ib_destroy_cq;
ib_dev->req_notify_cq = hr_dev->hw->req_notify_cq;
ib_dev->poll_cq = hr_dev->hw->poll_cq;
/* MR */
ib_dev->get_dma_mr = hns_roce_get_dma_mr;
ib_dev->reg_user_mr = hns_roce_reg_user_mr;
ib_dev->dereg_mr = hns_roce_dereg_mr;
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_REREG_MR) {
ib_dev->rereg_user_mr = hns_roce_rereg_user_mr;
ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR); ib_dev->uverbs_cmd_mask |= (1ULL << IB_USER_VERBS_CMD_REREG_MR);
ib_set_device_ops(ib_dev, &hns_roce_dev_mr_ops);
} }
/* MW */ /* MW */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_MW) {
ib_dev->alloc_mw = hns_roce_alloc_mw;
ib_dev->dealloc_mw = hns_roce_dealloc_mw;
ib_dev->uverbs_cmd_mask |= ib_dev->uverbs_cmd_mask |=
(1ULL << IB_USER_VERBS_CMD_ALLOC_MW) | (1ULL << IB_USER_VERBS_CMD_ALLOC_MW) |
(1ULL << IB_USER_VERBS_CMD_DEALLOC_MW); (1ULL << IB_USER_VERBS_CMD_DEALLOC_MW);
ib_set_device_ops(ib_dev, &hns_roce_dev_mw_ops);
} }
/* FRMR */ /* FRMR */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_FRMR)
ib_dev->alloc_mr = hns_roce_alloc_mr; ib_set_device_ops(ib_dev, &hns_roce_dev_frmr_ops);
ib_dev->map_mr_sg = hns_roce_map_mr_sg;
}
/* SRQ */ /* SRQ */
if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) { if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_SRQ) {
ib_dev->create_srq = hns_roce_create_srq;
ib_dev->modify_srq = hr_dev->hw->modify_srq;
ib_dev->query_srq = hr_dev->hw->query_srq;
ib_dev->destroy_srq = hns_roce_destroy_srq;
ib_dev->post_srq_recv = hr_dev->hw->post_srq_recv;
ib_dev->uverbs_cmd_mask |= ib_dev->uverbs_cmd_mask |=
(1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ULL << IB_USER_VERBS_CMD_CREATE_SRQ) |
(1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ULL << IB_USER_VERBS_CMD_MODIFY_SRQ) |
(1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ULL << IB_USER_VERBS_CMD_QUERY_SRQ) |
(1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ULL << IB_USER_VERBS_CMD_DESTROY_SRQ) |
(1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV); (1ULL << IB_USER_VERBS_CMD_POST_SRQ_RECV);
ib_set_device_ops(ib_dev, &hns_roce_dev_srq_ops);
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_srq_ops);
} }
/* OTHERS */
ib_dev->get_port_immutable = hns_roce_port_immutable;
ib_dev->disassociate_ucontext = hns_roce_disassociate_ucontext;
ib_dev->driver_id = RDMA_DRIVER_HNS; ib_dev->driver_id = RDMA_DRIVER_HNS;
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
ret = ib_register_device(ib_dev, "hns_%d", NULL); ret = ib_register_device(ib_dev, "hns_%d", NULL);
if (ret) { if (ret) {
dev_err(dev, "ib_register_device failed!\n"); dev_err(dev, "ib_register_device failed!\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment