Commit b82cab6b authored by Hal Rosenstock's avatar Hal Rosenstock Committed by Linus Torvalds

[PATCH] IB: Update MAD client API

Automatically allocate a MR when registering a MAD agent.
MAD clients are modified to use this updated API.
Signed-off-by: default avatarSean Hefty <sean.hefty@intel.com>
Signed-off-by: default avatarHal Rosenstock <halr@voltaire.com>
Cc: Roland Dreier <rolandd@cisco.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent c183a4c3
......@@ -134,7 +134,7 @@ static int agent_mad_send(struct ib_mad_agent *mad_agent,
sizeof(mad_priv->mad),
DMA_TO_DEVICE);
gather_list.length = sizeof(mad_priv->mad);
gather_list.lkey = (*port_priv->mr).lkey;
gather_list.lkey = mad_agent->mr->lkey;
send_wr.next = NULL;
send_wr.opcode = IB_WR_SEND;
......@@ -322,22 +322,12 @@ int ib_agent_port_open(struct ib_device *device, int port_num)
goto error3;
}
port_priv->mr = ib_get_dma_mr(port_priv->smp_agent->qp->pd,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(port_priv->mr)) {
printk(KERN_ERR SPFX "Couldn't get DMA MR\n");
ret = PTR_ERR(port_priv->mr);
goto error4;
}
spin_lock_irqsave(&ib_agent_port_list_lock, flags);
list_add_tail(&port_priv->port_list, &ib_agent_port_list);
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
return 0;
error4:
ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
error3:
ib_unregister_mad_agent(port_priv->smp_agent);
error2:
......@@ -361,8 +351,6 @@ int ib_agent_port_close(struct ib_device *device, int port_num)
list_del(&port_priv->port_list);
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
ib_dereg_mr(port_priv->mr);
ib_unregister_mad_agent(port_priv->perf_mgmt_agent);
ib_unregister_mad_agent(port_priv->smp_agent);
kfree(port_priv);
......
......@@ -33,7 +33,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* $Id: agent_priv.h 1389 2004-12-27 22:56:47Z roland $
* $Id: agent_priv.h 1640 2005-01-24 22:39:02Z halr $
*/
#ifndef __IB_AGENT_PRIV_H__
......@@ -57,7 +57,6 @@ struct ib_agent_port_private {
int port_num;
struct ib_mad_agent *smp_agent; /* SM class */
struct ib_mad_agent *perf_mgmt_agent; /* PerfMgmt class */
struct ib_mr *mr;
};
#endif /* __IB_AGENT_PRIV_H__ */
......@@ -261,19 +261,26 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
ret = ERR_PTR(-ENOMEM);
goto error1;
}
memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
mad_agent_priv->agent.mr = ib_get_dma_mr(port_priv->qp_info[qpn].qp->pd,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(mad_agent_priv->agent.mr)) {
ret = ERR_PTR(-ENOMEM);
goto error2;
}
if (mad_reg_req) {
reg_req = kmalloc(sizeof *reg_req, GFP_KERNEL);
if (!reg_req) {
ret = ERR_PTR(-ENOMEM);
goto error2;
goto error3;
}
/* Make a copy of the MAD registration request */
memcpy(reg_req, mad_reg_req, sizeof *reg_req);
}
/* Now, fill in the various structures */
memset(mad_agent_priv, 0, sizeof *mad_agent_priv);
mad_agent_priv->qp_info = &port_priv->qp_info[qpn];
mad_agent_priv->reg_req = reg_req;
mad_agent_priv->rmpp_version = rmpp_version;
......@@ -301,7 +308,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (method) {
if (method_in_use(&method,
mad_reg_req))
goto error3;
goto error4;
}
}
ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv,
......@@ -317,14 +324,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
if (is_vendor_method_in_use(
vendor_class,
mad_reg_req))
goto error3;
goto error4;
}
}
ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv);
}
if (ret2) {
ret = ERR_PTR(ret2);
goto error3;
goto error4;
}
}
......@@ -346,11 +353,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
return &mad_agent_priv->agent;
error3:
error4:
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
kfree(reg_req);
error2:
error3:
kfree(mad_agent_priv);
error2:
ib_dereg_mr(mad_agent_priv->agent.mr);
error1:
return ret;
}
......@@ -487,18 +496,15 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
* MADs, preventing us from queuing additional work
*/
cancel_mads(mad_agent_priv);
port_priv = mad_agent_priv->qp_info->port_priv;
cancel_delayed_work(&mad_agent_priv->timed_work);
flush_workqueue(port_priv->wq);
spin_lock_irqsave(&port_priv->reg_lock, flags);
remove_mad_reg_req(mad_agent_priv);
list_del(&mad_agent_priv->agent_list);
spin_unlock_irqrestore(&port_priv->reg_lock, flags);
/* XXX: Cleanup pending RMPP receives for this agent */
flush_workqueue(port_priv->wq);
atomic_dec(&mad_agent_priv->refcount);
wait_event(mad_agent_priv->wait,
......@@ -506,6 +512,7 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv)
if (mad_agent_priv->reg_req)
kfree(mad_agent_priv->reg_req);
ib_dereg_mr(mad_agent_priv->agent.mr);
kfree(mad_agent_priv);
}
......@@ -750,7 +757,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
list_add_tail(&local->completion_list, &mad_agent_priv->local_list);
spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
queue_work(mad_agent_priv->qp_info->port_priv->wq,
&mad_agent_priv->local_work);
&mad_agent_priv->local_work);
ret = 1;
out:
return ret;
......
......@@ -77,7 +77,6 @@ struct ib_sa_sm_ah {
struct ib_sa_port {
struct ib_mad_agent *agent;
struct ib_mr *mr;
struct ib_sa_sm_ah *sm_ah;
struct work_struct update_task;
spinlock_t ah_lock;
......@@ -492,7 +491,7 @@ static int send_mad(struct ib_sa_query *query, int timeout_ms)
sizeof (struct ib_sa_mad),
DMA_TO_DEVICE);
gather_list.length = sizeof (struct ib_sa_mad);
gather_list.lkey = port->mr->lkey;
gather_list.lkey = port->agent->mr->lkey;
pci_unmap_addr_set(query, mapping, gather_list.addr);
ret = ib_post_send_mad(port->agent, &wr, &bad_wr);
......@@ -780,7 +779,6 @@ static void ib_sa_add_one(struct ib_device *device)
sa_dev->end_port = e;
for (i = 0; i <= e - s; ++i) {
sa_dev->port[i].mr = NULL;
sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s;
spin_lock_init(&sa_dev->port[i].ah_lock);
......@@ -792,13 +790,6 @@ static void ib_sa_add_one(struct ib_device *device)
if (IS_ERR(sa_dev->port[i].agent))
goto err;
sa_dev->port[i].mr = ib_get_dma_mr(sa_dev->port[i].agent->qp->pd,
IB_ACCESS_LOCAL_WRITE);
if (IS_ERR(sa_dev->port[i].mr)) {
ib_unregister_mad_agent(sa_dev->port[i].agent);
goto err;
}
INIT_WORK(&sa_dev->port[i].update_task,
update_sm_ah, &sa_dev->port[i]);
}
......@@ -822,10 +813,8 @@ static void ib_sa_add_one(struct ib_device *device)
return;
err:
while (--i >= 0) {
ib_dereg_mr(sa_dev->port[i].mr);
while (--i >= 0)
ib_unregister_mad_agent(sa_dev->port[i].agent);
}
kfree(sa_dev);
......
......@@ -180,6 +180,7 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
struct ib_mad_agent {
struct ib_device *device;
struct ib_qp *qp;
struct ib_mr *mr;
ib_mad_recv_handler recv_handler;
ib_mad_send_handler send_handler;
ib_mad_snoop_handler snoop_handler;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment