Commit c9082e51 authored by Ira Weiny's avatar Ira Weiny Committed by Doug Ledford

IB/mad: Convert allocations from kmem_cache to kzalloc

This patch implements allocating alternate receive MAD buffers within the MAD
stack.  Support for OPA to send/recv variable sized MADs is implemented later.

    1) Convert MAD allocations from kmem_cache to kzalloc

       kzalloc is more flexible to support devices with different sized MADs
       and research and testing showed that the current use of kmem_cache does
       not provide performance benefits over kzalloc.

    2) Change struct ib_mad_private to use a flex array for the mad data
    3) Allocate ib_mad_private based on the size specified by devices in
       rdma_max_mad_size.
    4) Carry the allocated size in ib_mad_private to be used when processing
       ib_mad_private objects.
    5) Alter DMA mappings based on the mad_size of ib_mad_private.
    6) Replace the use of sizeof and static defines as appropriate
    7) Add appropriate casts for the MAD data when calling processing
       functions.
Signed-off-by: default avatarIra Weiny <ira.weiny@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 337877a4
......@@ -78,9 +78,9 @@ ib_get_agent_port(const struct ib_device *device, int port_num)
return entry;
}
void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh,
void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
const struct ib_wc *wc, const struct ib_device *device,
int port_num, int qpn)
int port_num, int qpn, size_t resp_mad_len)
{
struct ib_agent_port_private *port_priv;
struct ib_mad_agent *agent;
......@@ -107,7 +107,8 @@ void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh,
}
send_buf = ib_create_send_mad(agent, wc->src_qp, wc->pkey_index, 0,
IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
IB_MGMT_MAD_HDR,
resp_mad_len - IB_MGMT_MAD_HDR,
GFP_KERNEL,
IB_MGMT_BASE_VERSION);
if (IS_ERR(send_buf)) {
......@@ -115,7 +116,7 @@ void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh,
goto err1;
}
memcpy(send_buf->mad, mad, sizeof *mad);
memcpy(send_buf->mad, mad_hdr, resp_mad_len);
send_buf->ah = ah;
if (device->node_type == RDMA_NODE_IB_SWITCH) {
......
......@@ -44,8 +44,8 @@ extern int ib_agent_port_open(struct ib_device *device, int port_num);
extern int ib_agent_port_close(struct ib_device *device, int port_num);
extern void agent_send_response(const struct ib_mad *mad, const struct ib_grh *grh,
extern void agent_send_response(const struct ib_mad_hdr *mad_hdr, const struct ib_grh *grh,
const struct ib_wc *wc, const struct ib_device *device,
int port_num, int qpn);
int port_num, int qpn, size_t resp_mad_len);
#endif /* __AGENT_H_ */
This diff is collapsed.
......@@ -75,12 +75,9 @@ struct ib_mad_private_header {
struct ib_mad_private {
struct ib_mad_private_header header;
size_t mad_size;
struct ib_grh grh;
union {
struct ib_mad mad;
struct ib_rmpp_mad rmpp_mad;
struct ib_smp smp;
} mad;
u8 mad[0];
} __attribute__ ((packed));
struct ib_rmpp_segment {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment