Commit 4b38da75 authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/drivers: Convert easy drivers to use ib_device_set_netdev()

Drivers that never change their ndev dynamically do not need to use
the get_netdev callback.
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Acked-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Acked-by: default avatarMichal Kalderon <michal.kalderon@marvell.com>
Acked-by: default avatarAdit Ranadive <aditr@vmware.com>
parent 2b277dae
...@@ -119,21 +119,6 @@ static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, ...@@ -119,21 +119,6 @@ static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
} }
/* Device */ /* Device */
struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num)
{
struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev);
struct net_device *netdev = NULL;
rcu_read_lock();
if (rdev)
netdev = rdev->netdev;
if (netdev)
dev_hold(netdev);
rcu_read_unlock();
return netdev;
}
int bnxt_re_query_device(struct ib_device *ibdev, int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr, struct ib_device_attr *ib_attr,
struct ib_udata *udata) struct ib_udata *udata)
......
...@@ -142,8 +142,6 @@ struct bnxt_re_ucontext { ...@@ -142,8 +142,6 @@ struct bnxt_re_ucontext {
spinlock_t sh_lock; /* protect shpg */ spinlock_t sh_lock; /* protect shpg */
}; };
struct net_device *bnxt_re_get_netdev(struct ib_device *ibdev, u8 port_num);
int bnxt_re_query_device(struct ib_device *ibdev, int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr, struct ib_device_attr *ib_attr,
struct ib_udata *udata); struct ib_udata *udata);
......
...@@ -617,7 +617,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = { ...@@ -617,7 +617,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
.get_dma_mr = bnxt_re_get_dma_mr, .get_dma_mr = bnxt_re_get_dma_mr,
.get_hw_stats = bnxt_re_ib_get_hw_stats, .get_hw_stats = bnxt_re_ib_get_hw_stats,
.get_link_layer = bnxt_re_get_link_layer, .get_link_layer = bnxt_re_get_link_layer,
.get_netdev = bnxt_re_get_netdev,
.get_port_immutable = bnxt_re_get_port_immutable, .get_port_immutable = bnxt_re_get_port_immutable,
.map_mr_sg = bnxt_re_map_mr_sg, .map_mr_sg = bnxt_re_map_mr_sg,
.mmap = bnxt_re_mmap, .mmap = bnxt_re_mmap,
...@@ -646,6 +645,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = { ...@@ -646,6 +645,7 @@ static const struct ib_device_ops bnxt_re_dev_ops = {
static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
{ {
struct ib_device *ibdev = &rdev->ibdev; struct ib_device *ibdev = &rdev->ibdev;
int ret;
/* ib device init */ /* ib device init */
ibdev->owner = THIS_MODULE; ibdev->owner = THIS_MODULE;
...@@ -693,6 +693,10 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) ...@@ -693,6 +693,10 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group); rdma_set_device_sysfs_group(ibdev, &bnxt_re_dev_attr_group);
ibdev->driver_id = RDMA_DRIVER_BNXT_RE; ibdev->driver_id = RDMA_DRIVER_BNXT_RE;
ib_set_device_ops(ibdev, &bnxt_re_dev_ops); ib_set_device_ops(ibdev, &bnxt_re_dev_ops);
ret = ib_device_set_netdev(&rdev->ibdev, rdev->netdev, 1);
if (ret)
return ret;
return ib_register_device(ibdev, "bnxt_re%d"); return ib_register_device(ibdev, "bnxt_re%d");
} }
......
...@@ -234,25 +234,6 @@ static int hns_roce_query_device(struct ib_device *ib_dev, ...@@ -234,25 +234,6 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
return 0; return 0;
} }
static struct net_device *hns_roce_get_netdev(struct ib_device *ib_dev,
u8 port_num)
{
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct net_device *ndev;
if (port_num < 1 || port_num > hr_dev->caps.num_ports)
return NULL;
rcu_read_lock();
ndev = hr_dev->iboe.netdevs[port_num - 1];
if (ndev)
dev_hold(ndev);
rcu_read_unlock();
return ndev;
}
static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num, static int hns_roce_query_port(struct ib_device *ib_dev, u8 port_num,
struct ib_port_attr *props) struct ib_port_attr *props)
{ {
...@@ -458,7 +439,6 @@ static const struct ib_device_ops hns_roce_dev_ops = { ...@@ -458,7 +439,6 @@ static const struct ib_device_ops hns_roce_dev_ops = {
.fill_res_entry = hns_roce_fill_res_entry, .fill_res_entry = hns_roce_fill_res_entry,
.get_dma_mr = hns_roce_get_dma_mr, .get_dma_mr = hns_roce_get_dma_mr,
.get_link_layer = hns_roce_get_link_layer, .get_link_layer = hns_roce_get_link_layer,
.get_netdev = hns_roce_get_netdev,
.get_port_immutable = hns_roce_port_immutable, .get_port_immutable = hns_roce_port_immutable,
.mmap = hns_roce_mmap, .mmap = hns_roce_mmap,
.modify_device = hns_roce_modify_device, .modify_device = hns_roce_modify_device,
...@@ -502,6 +482,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -502,6 +482,7 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
struct hns_roce_ib_iboe *iboe = NULL; struct hns_roce_ib_iboe *iboe = NULL;
struct ib_device *ib_dev = NULL; struct ib_device *ib_dev = NULL;
struct device *dev = hr_dev->dev; struct device *dev = hr_dev->dev;
unsigned int i;
iboe = &hr_dev->iboe; iboe = &hr_dev->iboe;
spin_lock_init(&iboe->lock); spin_lock_init(&iboe->lock);
...@@ -567,6 +548,15 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev) ...@@ -567,6 +548,15 @@ static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
ib_dev->driver_id = RDMA_DRIVER_HNS; ib_dev->driver_id = RDMA_DRIVER_HNS;
ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops); ib_set_device_ops(ib_dev, hr_dev->hw->hns_roce_dev_ops);
ib_set_device_ops(ib_dev, &hns_roce_dev_ops); ib_set_device_ops(ib_dev, &hns_roce_dev_ops);
for (i = 0; i < hr_dev->caps.num_ports; i++) {
if (!hr_dev->iboe.netdevs[i])
continue;
ret = ib_device_set_netdev(ib_dev, hr_dev->iboe.netdevs[i],
i + 1);
if (ret)
return ret;
}
ret = ib_register_device(ib_dev, "hns_%d"); ret = ib_register_device(ib_dev, "hns_%d");
if (ret) { if (ret) {
dev_err(dev, "ib_register_device failed!\n"); dev_err(dev, "ib_register_device failed!\n");
......
...@@ -161,7 +161,6 @@ static const struct ib_device_ops ocrdma_dev_ops = { ...@@ -161,7 +161,6 @@ static const struct ib_device_ops ocrdma_dev_ops = {
.get_dev_fw_str = get_dev_fw_str, .get_dev_fw_str = get_dev_fw_str,
.get_dma_mr = ocrdma_get_dma_mr, .get_dma_mr = ocrdma_get_dma_mr,
.get_link_layer = ocrdma_link_layer, .get_link_layer = ocrdma_link_layer,
.get_netdev = ocrdma_get_netdev,
.get_port_immutable = ocrdma_port_immutable, .get_port_immutable = ocrdma_port_immutable,
.map_mr_sg = ocrdma_map_mr_sg, .map_mr_sg = ocrdma_map_mr_sg,
.mmap = ocrdma_mmap, .mmap = ocrdma_mmap,
...@@ -197,6 +196,8 @@ static const struct ib_device_ops ocrdma_dev_srq_ops = { ...@@ -197,6 +196,8 @@ static const struct ib_device_ops ocrdma_dev_srq_ops = {
static int ocrdma_register_device(struct ocrdma_dev *dev) static int ocrdma_register_device(struct ocrdma_dev *dev)
{ {
int ret;
ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid);
BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX); BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX);
memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC,
...@@ -251,6 +252,10 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) ...@@ -251,6 +252,10 @@ static int ocrdma_register_device(struct ocrdma_dev *dev)
} }
rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group); rdma_set_device_sysfs_group(&dev->ibdev, &ocrdma_attr_group);
dev->ibdev.driver_id = RDMA_DRIVER_OCRDMA; dev->ibdev.driver_id = RDMA_DRIVER_OCRDMA;
ret = ib_device_set_netdev(&dev->ibdev, dev->nic_info.netdev, 1);
if (ret)
return ret;
return ib_register_device(&dev->ibdev, "ocrdma%d"); return ib_register_device(&dev->ibdev, "ocrdma%d");
} }
...@@ -308,6 +313,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info) ...@@ -308,6 +313,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)
pr_err("Unable to allocate ib device\n"); pr_err("Unable to allocate ib device\n");
return NULL; return NULL;
} }
dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL); dev->mbx_cmd = kzalloc(sizeof(struct ocrdma_mqe_emb_cmd), GFP_KERNEL);
if (!dev->mbx_cmd) if (!dev->mbx_cmd)
goto idr_err; goto idr_err;
......
...@@ -113,24 +113,6 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, ...@@ -113,24 +113,6 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
return 0; return 0;
} }
struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
{
struct ocrdma_dev *dev;
struct net_device *ndev = NULL;
rcu_read_lock();
dev = get_ocrdma_dev(ibdev);
if (dev)
ndev = dev->nic_info.netdev;
if (ndev)
dev_hold(ndev);
rcu_read_unlock();
return ndev;
}
static inline void get_link_speed_and_width(struct ocrdma_dev *dev, static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
u8 *ib_speed, u8 *ib_width) u8 *ib_speed, u8 *ib_width)
{ {
......
...@@ -61,7 +61,6 @@ enum rdma_protocol_type ...@@ -61,7 +61,6 @@ enum rdma_protocol_type
ocrdma_query_protocol(struct ib_device *device, u8 port_num); ocrdma_query_protocol(struct ib_device *device, u8 port_num);
void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid);
struct net_device *ocrdma_get_netdev(struct ib_device *device, u8 port_num);
int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey); int ocrdma_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata);
......
...@@ -81,20 +81,6 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str) ...@@ -81,20 +81,6 @@ static void qedr_get_dev_fw_str(struct ib_device *ibdev, char *str)
(fw_ver >> 8) & 0xFF, fw_ver & 0xFF); (fw_ver >> 8) & 0xFF, fw_ver & 0xFF);
} }
static struct net_device *qedr_get_netdev(struct ib_device *dev, u8 port_num)
{
struct qedr_dev *qdev;
qdev = get_qedr_dev(dev);
dev_hold(qdev->ndev);
/* The HW vendor's device driver must guarantee
* that this function returns NULL before the net device has finished
* NETDEV_UNREGISTER state.
*/
return qdev->ndev;
}
static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num, static int qedr_roce_port_immutable(struct ib_device *ibdev, u8 port_num,
struct ib_port_immutable *immutable) struct ib_port_immutable *immutable)
{ {
...@@ -219,7 +205,6 @@ static const struct ib_device_ops qedr_dev_ops = { ...@@ -219,7 +205,6 @@ static const struct ib_device_ops qedr_dev_ops = {
.get_dev_fw_str = qedr_get_dev_fw_str, .get_dev_fw_str = qedr_get_dev_fw_str,
.get_dma_mr = qedr_get_dma_mr, .get_dma_mr = qedr_get_dma_mr,
.get_link_layer = qedr_link_layer, .get_link_layer = qedr_link_layer,
.get_netdev = qedr_get_netdev,
.map_mr_sg = qedr_map_mr_sg, .map_mr_sg = qedr_map_mr_sg,
.mmap = qedr_mmap, .mmap = qedr_mmap,
.modify_port = qedr_modify_port, .modify_port = qedr_modify_port,
...@@ -295,6 +280,10 @@ static int qedr_register_device(struct qedr_dev *dev) ...@@ -295,6 +280,10 @@ static int qedr_register_device(struct qedr_dev *dev)
ib_set_device_ops(&dev->ibdev, &qedr_dev_ops); ib_set_device_ops(&dev->ibdev, &qedr_dev_ops);
dev->ibdev.driver_id = RDMA_DRIVER_QEDR; dev->ibdev.driver_id = RDMA_DRIVER_QEDR;
rc = ib_device_set_netdev(&dev->ibdev, dev->ndev, 1);
if (rc)
return rc;
return ib_register_device(&dev->ibdev, "qedr%d"); return ib_register_device(&dev->ibdev, "qedr%d");
} }
......
...@@ -143,24 +143,6 @@ static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num, ...@@ -143,24 +143,6 @@ static int pvrdma_port_immutable(struct ib_device *ibdev, u8 port_num,
return 0; return 0;
} }
static struct net_device *pvrdma_get_netdev(struct ib_device *ibdev,
u8 port_num)
{
struct net_device *netdev;
struct pvrdma_dev *dev = to_vdev(ibdev);
if (port_num != 1)
return NULL;
rcu_read_lock();
netdev = dev->netdev;
if (netdev)
dev_hold(netdev);
rcu_read_unlock();
return netdev;
}
static const struct ib_device_ops pvrdma_dev_ops = { static const struct ib_device_ops pvrdma_dev_ops = {
.add_gid = pvrdma_add_gid, .add_gid = pvrdma_add_gid,
.alloc_mr = pvrdma_alloc_mr, .alloc_mr = pvrdma_alloc_mr,
...@@ -179,7 +161,6 @@ static const struct ib_device_ops pvrdma_dev_ops = { ...@@ -179,7 +161,6 @@ static const struct ib_device_ops pvrdma_dev_ops = {
.get_dev_fw_str = pvrdma_get_fw_ver_str, .get_dev_fw_str = pvrdma_get_fw_ver_str,
.get_dma_mr = pvrdma_get_dma_mr, .get_dma_mr = pvrdma_get_dma_mr,
.get_link_layer = pvrdma_port_link_layer, .get_link_layer = pvrdma_port_link_layer,
.get_netdev = pvrdma_get_netdev,
.get_port_immutable = pvrdma_port_immutable, .get_port_immutable = pvrdma_port_immutable,
.map_mr_sg = pvrdma_map_mr_sg, .map_mr_sg = pvrdma_map_mr_sg,
.mmap = pvrdma_mmap, .mmap = pvrdma_mmap,
...@@ -281,6 +262,9 @@ static int pvrdma_register_device(struct pvrdma_dev *dev) ...@@ -281,6 +262,9 @@ static int pvrdma_register_device(struct pvrdma_dev *dev)
goto err_qp_free; goto err_qp_free;
} }
dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA; dev->ib_dev.driver_id = RDMA_DRIVER_VMW_PVRDMA;
ret = ib_device_set_netdev(&dev->ib_dev, dev->netdev, 1);
if (ret)
return ret;
spin_lock_init(&dev->srq_tbl_lock); spin_lock_init(&dev->srq_tbl_lock);
rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group); rdma_set_device_sysfs_group(&dev->ib_dev, &pvrdma_attr_group);
...@@ -724,6 +708,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, ...@@ -724,6 +708,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE); pvrdma_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
break; break;
case NETDEV_UNREGISTER: case NETDEV_UNREGISTER:
ib_device_set_netdev(&dev->ib_dev, NULL, 1);
dev_put(dev->netdev); dev_put(dev->netdev);
dev->netdev = NULL; dev->netdev = NULL;
break; break;
...@@ -735,6 +720,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev, ...@@ -735,6 +720,7 @@ static void pvrdma_netdevice_event_handle(struct pvrdma_dev *dev,
if ((dev->netdev == NULL) && if ((dev->netdev == NULL) &&
(pci_get_drvdata(pdev_net) == ndev)) { (pci_get_drvdata(pdev_net) == ndev)) {
/* this is our netdev */ /* this is our netdev */
ib_device_set_netdev(&dev->ib_dev, ndev, 1);
dev->netdev = ndev; dev->netdev = ndev;
dev_hold(ndev); dev_hold(ndev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment