Commit ea1075ed authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA: Add and use rdma_for_each_port

We have many loops iterating over all of the end port numbers on a struct
ib_device, simplify them with a for_each helper.
Reviewed-by: default avatarParav Pandit <parav@mellanox.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent f2a0e45f
...@@ -361,6 +361,7 @@ ForEachMacros: ...@@ -361,6 +361,7 @@ ForEachMacros:
- 'radix_tree_for_each_slot' - 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged' - 'radix_tree_for_each_tagged'
- 'rbtree_postorder_for_each_entry_safe' - 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_port'
- 'resource_list_for_each_entry' - 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe' - 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu' - 'rhl_for_each_entry_rcu'
......
...@@ -1428,7 +1428,7 @@ static void ib_cache_event(struct ib_event_handler *handler, ...@@ -1428,7 +1428,7 @@ static void ib_cache_event(struct ib_event_handler *handler,
int ib_cache_setup_one(struct ib_device *device) int ib_cache_setup_one(struct ib_device *device)
{ {
int p; unsigned int p;
int err; int err;
rwlock_init(&device->cache.lock); rwlock_init(&device->cache.lock);
...@@ -1447,8 +1447,8 @@ int ib_cache_setup_one(struct ib_device *device) ...@@ -1447,8 +1447,8 @@ int ib_cache_setup_one(struct ib_device *device)
return err; return err;
} }
for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) rdma_for_each_port (device, p)
ib_cache_update(device, p + rdma_start_port(device), true); ib_cache_update(device, p, true);
INIT_IB_EVENT_HANDLER(&device->cache.event_handler, INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
device, ib_cache_event); device, ib_cache_event);
......
...@@ -659,7 +659,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) ...@@ -659,7 +659,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
struct cma_device *cma_dev; struct cma_device *cma_dev;
enum ib_gid_type gid_type; enum ib_gid_type gid_type;
int ret = -ENODEV; int ret = -ENODEV;
u8 port; unsigned int port;
if (dev_addr->dev_type != ARPHRD_INFINIBAND && if (dev_addr->dev_type != ARPHRD_INFINIBAND &&
id_priv->id.ps == RDMA_PS_IPOIB) id_priv->id.ps == RDMA_PS_IPOIB)
...@@ -673,8 +673,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv) ...@@ -673,8 +673,7 @@ static int cma_acquire_dev_by_src_ip(struct rdma_id_private *id_priv)
mutex_lock(&lock); mutex_lock(&lock);
list_for_each_entry(cma_dev, &dev_list, list) { list_for_each_entry(cma_dev, &dev_list, list) {
for (port = rdma_start_port(cma_dev->device); rdma_for_each_port (cma_dev->device, port) {
port <= rdma_end_port(cma_dev->device); port++) {
gidp = rdma_protocol_roce(cma_dev->device, port) ? gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid; &iboe_gid : &gid;
gid_type = cma_dev->default_gid_type[port - 1]; gid_type = cma_dev->default_gid_type[port - 1];
...@@ -4548,7 +4547,7 @@ static void cma_add_one(struct ib_device *device) ...@@ -4548,7 +4547,7 @@ static void cma_add_one(struct ib_device *device)
if (!cma_dev->default_roce_tos) if (!cma_dev->default_roce_tos)
goto free_gid_type; goto free_gid_type;
for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { rdma_for_each_port (device, i) {
supported_gids = roce_gid_type_mask_support(device, i); supported_gids = roce_gid_type_mask_support(device, i);
WARN_ON(!supported_gids); WARN_ON(!supported_gids);
if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE)) if (supported_gids & (1 << CMA_PREFERRED_ROCE_GID_TYPE))
......
...@@ -470,10 +470,8 @@ static int verify_immutable(const struct ib_device *dev, u8 port) ...@@ -470,10 +470,8 @@ static int verify_immutable(const struct ib_device *dev, u8 port)
static int read_port_immutable(struct ib_device *device) static int read_port_immutable(struct ib_device *device)
{ {
unsigned int port;
int ret; int ret;
u8 start_port = rdma_start_port(device);
u8 end_port = rdma_end_port(device);
u8 port;
/** /**
* device->port_immutable is indexed directly by the port number to make * device->port_immutable is indexed directly by the port number to make
...@@ -482,13 +480,13 @@ static int read_port_immutable(struct ib_device *device) ...@@ -482,13 +480,13 @@ static int read_port_immutable(struct ib_device *device)
* Therefore port_immutable is declared as a 1 based array with * Therefore port_immutable is declared as a 1 based array with
* potential empty slots at the beginning. * potential empty slots at the beginning.
*/ */
device->port_immutable = kcalloc(end_port + 1, device->port_immutable =
sizeof(*device->port_immutable), kcalloc(rdma_end_port(device) + 1,
GFP_KERNEL); sizeof(*device->port_immutable), GFP_KERNEL);
if (!device->port_immutable) if (!device->port_immutable)
return -ENOMEM; return -ENOMEM;
for (port = start_port; port <= end_port; ++port) { rdma_for_each_port (device, port) {
ret = device->ops.get_port_immutable( ret = device->ops.get_port_immutable(
device, port, &device->port_immutable[port]); device, port, &device->port_immutable[port]);
if (ret) if (ret)
...@@ -540,9 +538,9 @@ static void ib_policy_change_task(struct work_struct *work) ...@@ -540,9 +538,9 @@ static void ib_policy_change_task(struct work_struct *work)
down_read(&devices_rwsem); down_read(&devices_rwsem);
xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) { xa_for_each_marked (&devices, index, dev, DEVICE_REGISTERED) {
int i; unsigned int i;
for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { rdma_for_each_port (dev, i) {
u64 sp; u64 sp;
int ret = ib_get_cached_subnet_prefix(dev, int ret = ib_get_cached_subnet_prefix(dev,
i, i,
...@@ -1060,10 +1058,9 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev, ...@@ -1060,10 +1058,9 @@ void ib_enum_roce_netdev(struct ib_device *ib_dev,
roce_netdev_callback cb, roce_netdev_callback cb,
void *cookie) void *cookie)
{ {
u8 port; unsigned int port;
for (port = rdma_start_port(ib_dev); port <= rdma_end_port(ib_dev); rdma_for_each_port (ib_dev, port)
port++)
if (rdma_protocol_roce(ib_dev, port)) { if (rdma_protocol_roce(ib_dev, port)) {
struct net_device *idev = NULL; struct net_device *idev = NULL;
...@@ -1217,9 +1214,10 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, ...@@ -1217,9 +1214,10 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid,
u8 *port_num, u16 *index) u8 *port_num, u16 *index)
{ {
union ib_gid tmp_gid; union ib_gid tmp_gid;
int ret, port, i; unsigned int port;
int ret, i;
for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { rdma_for_each_port (device, port) {
if (!rdma_protocol_ib(device, port)) if (!rdma_protocol_ib(device, port))
continue; continue;
......
...@@ -3326,9 +3326,9 @@ static void ib_mad_init_device(struct ib_device *device) ...@@ -3326,9 +3326,9 @@ static void ib_mad_init_device(struct ib_device *device)
static void ib_mad_remove_device(struct ib_device *device, void *client_data) static void ib_mad_remove_device(struct ib_device *device, void *client_data)
{ {
int i; unsigned int i;
for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { rdma_for_each_port (device, i) {
if (!rdma_cap_ib_mad(device, i)) if (!rdma_cap_ib_mad(device, i))
continue; continue;
......
...@@ -774,7 +774,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, ...@@ -774,7 +774,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
u32 idx = 0; u32 idx = 0;
u32 ifindex; u32 ifindex;
int err; int err;
u32 p; unsigned int p;
err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, err = nlmsg_parse(cb->nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1,
nldev_policy, NULL); nldev_policy, NULL);
...@@ -786,7 +786,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb, ...@@ -786,7 +786,7 @@ static int nldev_port_get_dumpit(struct sk_buff *skb,
if (!device) if (!device)
return -EINVAL; return -EINVAL;
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { rdma_for_each_port (device, p) {
/* /*
* The dumpit function returns all information from specific * The dumpit function returns all information from specific
* index. This specific index is taken from the netlink * index. This specific index is taken from the netlink
......
...@@ -422,12 +422,15 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec) ...@@ -422,12 +422,15 @@ void ib_close_shared_qp_security(struct ib_qp_security *sec)
int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev)
{ {
u8 i = rdma_start_port(dev); unsigned int i;
bool is_ib = false; bool is_ib = false;
int ret; int ret;
while (i <= rdma_end_port(dev) && !is_ib) rdma_for_each_port (dev, i) {
is_ib = rdma_protocol_ib(dev, i++); is_ib = rdma_protocol_ib(dev, i++);
if (is_ib)
break;
}
/* If this isn't an IB device don't create the security context */ /* If this isn't an IB device don't create the security context */
if (!is_ib) if (!is_ib)
...@@ -561,9 +564,9 @@ void ib_security_cache_change(struct ib_device *device, ...@@ -561,9 +564,9 @@ void ib_security_cache_change(struct ib_device *device,
void ib_security_release_port_pkey_list(struct ib_device *device) void ib_security_release_port_pkey_list(struct ib_device *device)
{ {
struct pkey_index_qp_list *pkey, *tmp_pkey; struct pkey_index_qp_list *pkey, *tmp_pkey;
int i; unsigned int i;
for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { rdma_for_each_port (device, i) {
list_for_each_entry_safe(pkey, list_for_each_entry_safe(pkey,
tmp_pkey, tmp_pkey,
&device->port_pkey_list[i].pkey_list, &device->port_pkey_list[i].pkey_list,
......
...@@ -1308,23 +1308,17 @@ static void ib_free_port_attrs(struct ib_device *device) ...@@ -1308,23 +1308,17 @@ static void ib_free_port_attrs(struct ib_device *device)
static int ib_setup_port_attrs(struct ib_device *device) static int ib_setup_port_attrs(struct ib_device *device)
{ {
unsigned int port;
int ret; int ret;
int i;
device->ports_kobj = kobject_create_and_add("ports", &device->dev.kobj); device->ports_kobj = kobject_create_and_add("ports", &device->dev.kobj);
if (!device->ports_kobj) if (!device->ports_kobj)
return -ENOMEM; return -ENOMEM;
if (rdma_cap_ib_switch(device)) { rdma_for_each_port (device, port) {
ret = add_port(device, 0); ret = add_port(device, port);
if (ret) if (ret)
goto err_put; goto err_put;
} else {
for (i = 1; i <= device->phys_port_cnt; ++i) {
ret = add_port(device, i);
if (ret)
goto err_put;
}
} }
return 0; return 0;
......
...@@ -1323,14 +1323,15 @@ static void ib_umad_add_one(struct ib_device *device) ...@@ -1323,14 +1323,15 @@ static void ib_umad_add_one(struct ib_device *device)
static void ib_umad_remove_one(struct ib_device *device, void *client_data) static void ib_umad_remove_one(struct ib_device *device, void *client_data)
{ {
struct ib_umad_device *umad_dev = client_data; struct ib_umad_device *umad_dev = client_data;
int i; unsigned int i;
if (!umad_dev) if (!umad_dev)
return; return;
for (i = 0; i <= rdma_end_port(device) - rdma_start_port(device); ++i) { rdma_for_each_port (device, i) {
if (rdma_cap_ib_mad(device, i + rdma_start_port(device))) if (rdma_cap_ib_mad(device, i))
ib_umad_kill_port(&umad_dev->ports[i]); ib_umad_kill_port(
&umad_dev->ports[i - rdma_start_port(device)]);
} }
/* balances kref_init() */ /* balances kref_init() */
ib_umad_dev_put(umad_dev); ib_umad_dev_put(umad_dev);
......
...@@ -2495,7 +2495,7 @@ static void ipoib_add_one(struct ib_device *device) ...@@ -2495,7 +2495,7 @@ static void ipoib_add_one(struct ib_device *device)
struct list_head *dev_list; struct list_head *dev_list;
struct net_device *dev; struct net_device *dev;
struct ipoib_dev_priv *priv; struct ipoib_dev_priv *priv;
int p; unsigned int p;
int count = 0; int count = 0;
dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL); dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL);
...@@ -2504,7 +2504,7 @@ static void ipoib_add_one(struct ib_device *device) ...@@ -2504,7 +2504,7 @@ static void ipoib_add_one(struct ib_device *device)
INIT_LIST_HEAD(dev_list); INIT_LIST_HEAD(dev_list);
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { rdma_for_each_port (device, p) {
if (!rdma_protocol_ib(device, p)) if (!rdma_protocol_ib(device, p))
continue; continue;
dev = ipoib_add_port("ib%d", device, p); dev = ipoib_add_port("ib%d", device, p);
......
...@@ -4127,7 +4127,8 @@ static void srp_add_one(struct ib_device *device) ...@@ -4127,7 +4127,8 @@ static void srp_add_one(struct ib_device *device)
struct srp_device *srp_dev; struct srp_device *srp_dev;
struct ib_device_attr *attr = &device->attrs; struct ib_device_attr *attr = &device->attrs;
struct srp_host *host; struct srp_host *host;
int mr_page_shift, p; int mr_page_shift;
unsigned int p;
u64 max_pages_per_mr; u64 max_pages_per_mr;
unsigned int flags = 0; unsigned int flags = 0;
...@@ -4194,7 +4195,7 @@ static void srp_add_one(struct ib_device *device) ...@@ -4194,7 +4195,7 @@ static void srp_add_one(struct ib_device *device)
WARN_ON_ONCE(srp_dev->global_rkey == 0); WARN_ON_ONCE(srp_dev->global_rkey == 0);
} }
for (p = rdma_start_port(device); p <= rdma_end_port(device); ++p) { rdma_for_each_port (device, p) {
host = srp_add_port(srp_dev, p); host = srp_add_port(srp_dev, p);
if (host) if (host)
list_add_tail(&host->list, &srp_dev->dev_list); list_add_tail(&host->list, &srp_dev->dev_list);
......
...@@ -2827,6 +2827,16 @@ static inline u8 rdma_start_port(const struct ib_device *device) ...@@ -2827,6 +2827,16 @@ static inline u8 rdma_start_port(const struct ib_device *device)
return rdma_cap_ib_switch(device) ? 0 : 1; return rdma_cap_ib_switch(device) ? 0 : 1;
} }
/**
* rdma_for_each_port - Iterate over all valid port numbers of the IB device
* @device - The struct ib_device * to iterate over
* @iter - The unsigned int to store the port number
*/
#define rdma_for_each_port(device, iter) \
for (iter = rdma_start_port(device + BUILD_BUG_ON_ZERO(!__same_type( \
unsigned int, iter))); \
iter <= rdma_end_port(device); (iter)++)
/** /**
* rdma_end_port - Return the last valid port number for the device * rdma_end_port - Return the last valid port number for the device
* specified * specified
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment