Commit 949a2370 authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Jason Gunthorpe

IB/mad: Convert ib_mad_clients to XArray

Pull the allocation function out into its own function to reduce the
length of ib_register_mad_agent() a little and keep all the allocation
logic together.
Signed-off-by: default avatarMatthew Wilcox <willy@infradead.org>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent f1430536
...@@ -38,10 +38,10 @@ ...@@ -38,10 +38,10 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/idr.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/security.h> #include <linux/security.h>
#include <linux/xarray.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include "mad_priv.h" #include "mad_priv.h"
...@@ -59,12 +59,9 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests ...@@ -59,12 +59,9 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests
module_param_named(recv_queue_size, mad_recvq_size, int, 0444); module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests");
/* /* Client ID 0 is used for snoop-only clients */
* The mlx4 driver uses the top byte to distinguish which virtual function static DEFINE_XARRAY_ALLOC1(ib_mad_clients);
* generated the MAD, so we must avoid using it. static u32 ib_mad_client_next;
*/
#define AGENT_ID_LIMIT (1 << 24)
static DEFINE_IDR(ib_mad_clients);
static struct list_head ib_mad_port_list; static struct list_head ib_mad_port_list;
/* Port list lock */ /* Port list lock */
...@@ -389,18 +386,17 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -389,18 +386,17 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
goto error4; goto error4;
} }
idr_preload(GFP_KERNEL); /*
idr_lock(&ib_mad_clients); * The mlx4 driver uses the top byte to distinguish which virtual
ret2 = idr_alloc_cyclic(&ib_mad_clients, mad_agent_priv, 0, * function generated the MAD, so we must avoid using it.
AGENT_ID_LIMIT, GFP_ATOMIC); */
idr_unlock(&ib_mad_clients); ret2 = xa_alloc_cyclic(&ib_mad_clients, &mad_agent_priv->agent.hi_tid,
idr_preload_end(); mad_agent_priv, XA_LIMIT(0, (1 << 24) - 1),
&ib_mad_client_next, GFP_KERNEL);
if (ret2 < 0) { if (ret2 < 0) {
ret = ERR_PTR(ret2); ret = ERR_PTR(ret2);
goto error5; goto error5;
} }
mad_agent_priv->agent.hi_tid = ret2;
/* /*
* Make sure MAD registration (if supplied) * Make sure MAD registration (if supplied)
...@@ -448,9 +444,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, ...@@ -448,9 +444,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
return &mad_agent_priv->agent; return &mad_agent_priv->agent;
error6: error6:
spin_unlock_irq(&port_priv->reg_lock); spin_unlock_irq(&port_priv->reg_lock);
idr_lock(&ib_mad_clients); xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
idr_unlock(&ib_mad_clients);
error5: error5:
ib_mad_agent_security_cleanup(&mad_agent_priv->agent); ib_mad_agent_security_cleanup(&mad_agent_priv->agent);
error4: error4:
...@@ -614,9 +608,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) ...@@ -614,9 +608,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
spin_lock_irq(&port_priv->reg_lock); spin_lock_irq(&port_priv->reg_lock);
remove_mad_reg_req(mad_agent_priv); remove_mad_reg_req(mad_agent_priv);
spin_unlock_irq(&port_priv->reg_lock); spin_unlock_irq(&port_priv->reg_lock);
idr_lock(&ib_mad_clients); xa_erase(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
idr_remove(&ib_mad_clients, mad_agent_priv->agent.hi_tid);
idr_unlock(&ib_mad_clients);
flush_workqueue(port_priv->wq); flush_workqueue(port_priv->wq);
ib_cancel_rmpp_recvs(mad_agent_priv); ib_cancel_rmpp_recvs(mad_agent_priv);
...@@ -1756,7 +1748,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, ...@@ -1756,7 +1748,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv,
*/ */
hi_tid = be64_to_cpu(mad_hdr->tid) >> 32; hi_tid = be64_to_cpu(mad_hdr->tid) >> 32;
rcu_read_lock(); rcu_read_lock();
mad_agent = idr_find(&ib_mad_clients, hi_tid); mad_agent = xa_load(&ib_mad_clients, hi_tid);
if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount)) if (mad_agent && !atomic_inc_not_zero(&mad_agent->refcount))
mad_agent = NULL; mad_agent = NULL;
rcu_read_unlock(); rcu_read_unlock();
...@@ -3356,9 +3348,6 @@ int ib_mad_init(void) ...@@ -3356,9 +3348,6 @@ int ib_mad_init(void)
INIT_LIST_HEAD(&ib_mad_port_list); INIT_LIST_HEAD(&ib_mad_port_list);
/* Client ID 0 is used for snoop-only clients */
idr_alloc(&ib_mad_clients, NULL, 0, 0, GFP_KERNEL);
if (ib_register_client(&mad_client)) { if (ib_register_client(&mad_client)) {
pr_err("Couldn't register ib_mad client\n"); pr_err("Couldn't register ib_mad client\n");
return -EINVAL; return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment