Commit 5992fd19 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband:
  IPoIB: Correct debugging output when path record lookup fails
  RDMA/cxgb3: Stop the EP Timer on BAD CLOSE
  RDMA/cxgb3: cleanups
  RDMA/cma: Remove unused node_guid from cma_device structure
  IB/cm: Remove ca_guid from cm_device structure
  RDMA/cma: Request reversible paths only
  IB/core: Set hop limit in ib_init_ah_from_wc correctly
  IB/uverbs: Return correct error for invalid PD in register MR
  IPoIB: Remove unused local_rate tracking
  IPoIB/cm: Improve small message bandwidth
  IB/mthca: Make 2 functions static
parents eafb4f18 843613b0
...@@ -88,7 +88,6 @@ struct cm_port { ...@@ -88,7 +88,6 @@ struct cm_port {
struct cm_device { struct cm_device {
struct list_head list; struct list_head list;
struct ib_device *device; struct ib_device *device;
__be64 ca_guid;
struct cm_port port[0]; struct cm_port port[0];
}; };
...@@ -739,8 +738,8 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err) ...@@ -739,8 +738,8 @@ static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg); ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irqrestore(&cm_id_priv->lock, flags); spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT, ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
&cm_id_priv->av.port->cm_dev->ca_guid, &cm_id_priv->id.device->node_guid,
sizeof cm_id_priv->av.port->cm_dev->ca_guid, sizeof cm_id_priv->id.device->node_guid,
NULL, 0); NULL, 0);
break; break;
case IB_CM_REQ_RCVD: case IB_CM_REQ_RCVD:
...@@ -883,7 +882,7 @@ static void cm_format_req(struct cm_req_msg *req_msg, ...@@ -883,7 +882,7 @@ static void cm_format_req(struct cm_req_msg *req_msg,
req_msg->local_comm_id = cm_id_priv->id.local_id; req_msg->local_comm_id = cm_id_priv->id.local_id;
req_msg->service_id = param->service_id; req_msg->service_id = param->service_id;
req_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num)); cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
cm_req_set_resp_res(req_msg, param->responder_resources); cm_req_set_resp_res(req_msg, param->responder_resources);
cm_req_set_init_depth(req_msg, param->initiator_depth); cm_req_set_init_depth(req_msg, param->initiator_depth);
...@@ -1442,7 +1441,7 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg, ...@@ -1442,7 +1441,7 @@ static void cm_format_rep(struct cm_rep_msg *rep_msg,
cm_rep_set_flow_ctrl(rep_msg, param->flow_control); cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count); cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
cm_rep_set_srq(rep_msg, param->srq); cm_rep_set_srq(rep_msg, param->srq);
rep_msg->local_ca_guid = cm_id_priv->av.port->cm_dev->ca_guid; rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
if (param->private_data && param->private_data_len) if (param->private_data && param->private_data_len)
memcpy(rep_msg->private_data, param->private_data, memcpy(rep_msg->private_data, param->private_data,
...@@ -3385,7 +3384,6 @@ static void cm_add_one(struct ib_device *device) ...@@ -3385,7 +3384,6 @@ static void cm_add_one(struct ib_device *device)
return; return;
cm_dev->device = device; cm_dev->device = device;
cm_dev->ca_guid = device->node_guid;
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= device->phys_port_cnt; i++) { for (i = 1; i <= device->phys_port_cnt; i++) {
......
...@@ -77,7 +77,6 @@ static int next_port; ...@@ -77,7 +77,6 @@ static int next_port;
struct cma_device { struct cma_device {
struct list_head list; struct list_head list;
struct ib_device *device; struct ib_device *device;
__be64 node_guid;
struct completion comp; struct completion comp;
atomic_t refcount; atomic_t refcount;
struct list_head id_list; struct list_head id_list;
...@@ -1492,11 +1491,13 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms, ...@@ -1492,11 +1491,13 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
ib_addr_get_dgid(addr, &path_rec.dgid); ib_addr_get_dgid(addr, &path_rec.dgid);
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr)); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(addr));
path_rec.numb_path = 1; path_rec.numb_path = 1;
path_rec.reversible = 1;
id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device, id_priv->query_id = ib_sa_path_rec_get(&sa_client, id_priv->id.device,
id_priv->id.port_num, &path_rec, id_priv->id.port_num, &path_rec,
IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH, IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |
IB_SA_PATH_REC_REVERSIBLE,
timeout_ms, GFP_KERNEL, timeout_ms, GFP_KERNEL,
cma_query_handler, work, &id_priv->query); cma_query_handler, work, &id_priv->query);
...@@ -2672,7 +2673,6 @@ static void cma_add_one(struct ib_device *device) ...@@ -2672,7 +2673,6 @@ static void cma_add_one(struct ib_device *device)
return; return;
cma_dev->device = device; cma_dev->device = device;
cma_dev->node_guid = device->node_guid;
init_completion(&cma_dev->comp); init_completion(&cma_dev->comp);
atomic_set(&cma_dev->refcount, 1); atomic_set(&cma_dev->refcount, 1);
......
...@@ -622,8 +622,10 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file, ...@@ -622,8 +622,10 @@ ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
obj->umem.virt_base = cmd.hca_va; obj->umem.virt_base = cmd.hca_va;
pd = idr_read_pd(cmd.pd_handle, file->ucontext); pd = idr_read_pd(cmd.pd_handle, file->ucontext);
if (!pd) if (!pd) {
ret = -EINVAL;
goto err_release; goto err_release;
}
mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata); mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
if (IS_ERR(mr)) { if (IS_ERR(mr)) {
......
...@@ -167,7 +167,7 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, ...@@ -167,7 +167,7 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc,
ah_attr->grh.sgid_index = (u8) gid_index; ah_attr->grh.sgid_index = (u8) gid_index;
flow_class = be32_to_cpu(grh->version_tclass_flow); flow_class = be32_to_cpu(grh->version_tclass_flow);
ah_attr->grh.flow_label = flow_class & 0xFFFFF; ah_attr->grh.flow_label = flow_class & 0xFFFFF;
ah_attr->grh.hop_limit = grh->hop_limit; ah_attr->grh.hop_limit = 0xFF;
ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF; ah_attr->grh.traffic_class = (flow_class >> 20) & 0xFF;
} }
return 0; return 0;
......
...@@ -8,5 +8,4 @@ iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \ ...@@ -8,5 +8,4 @@ iw_cxgb3-y := iwch_cm.o iwch_ev.o iwch_cq.o iwch_qp.o iwch_mem.o \
ifdef CONFIG_INFINIBAND_CXGB3_DEBUG ifdef CONFIG_INFINIBAND_CXGB3_DEBUG
EXTRA_CFLAGS += -DDEBUG EXTRA_CFLAGS += -DDEBUG
iw_cxgb3-y += cxio_dbg.o
endif endif
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
static LIST_HEAD(rdev_list); static LIST_HEAD(rdev_list);
static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL; static cxio_hal_ev_callback_func_t cxio_ev_cb = NULL;
static inline struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name) static struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
{ {
struct cxio_rdev *rdev; struct cxio_rdev *rdev;
...@@ -55,8 +55,7 @@ static inline struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name) ...@@ -55,8 +55,7 @@ static inline struct cxio_rdev *cxio_hal_find_rdev_by_name(char *dev_name)
return NULL; return NULL;
} }
static inline struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev static struct cxio_rdev *cxio_hal_find_rdev_by_t3cdev(struct t3cdev *tdev)
*tdev)
{ {
struct cxio_rdev *rdev; struct cxio_rdev *rdev;
...@@ -118,7 +117,7 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq, ...@@ -118,7 +117,7 @@ int cxio_hal_cq_op(struct cxio_rdev *rdev_p, struct t3_cq *cq,
return 0; return 0;
} }
static inline int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid) static int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
{ {
struct rdma_cq_setup setup; struct rdma_cq_setup setup;
setup.id = cqid; setup.id = cqid;
...@@ -130,7 +129,7 @@ static inline int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid) ...@@ -130,7 +129,7 @@ static inline int cxio_hal_clear_cq_ctx(struct cxio_rdev *rdev_p, u32 cqid)
return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup)); return (rdev_p->t3cdev_p->ctl(rdev_p->t3cdev_p, RDMA_CQ_SETUP, &setup));
} }
int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid) static int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev_p, u32 qpid)
{ {
u64 sge_cmd; u64 sge_cmd;
struct t3_modify_qp_wr *wqe; struct t3_modify_qp_wr *wqe;
...@@ -425,7 +424,7 @@ void cxio_flush_hw_cq(struct t3_cq *cq) ...@@ -425,7 +424,7 @@ void cxio_flush_hw_cq(struct t3_cq *cq)
} }
} }
static inline int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq) static int cqe_completes_wr(struct t3_cqe *cqe, struct t3_wq *wq)
{ {
if (CQE_OPCODE(*cqe) == T3_TERMINATE) if (CQE_OPCODE(*cqe) == T3_TERMINATE)
return 0; return 0;
...@@ -760,17 +759,6 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry, ...@@ -760,17 +759,6 @@ static int __cxio_tpt_op(struct cxio_rdev *rdev_p, u32 reset_tpt_entry,
return err; return err;
} }
/* IN : stag key, pdid, pbl_size
* Out: stag index, actaul pbl_size, and pbl_addr allocated.
*/
int cxio_allocate_stag(struct cxio_rdev *rdev_p, u32 * stag, u32 pdid,
enum tpt_mem_perm perm, u32 * pbl_size, u32 * pbl_addr)
{
*stag = T3_STAG_UNSET;
return (__cxio_tpt_op(rdev_p, 0, stag, 0, pdid, TPT_NON_SHARED_MR,
perm, 0, 0ULL, 0, 0, NULL, pbl_size, pbl_addr));
}
int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid, int cxio_register_phys_mem(struct cxio_rdev *rdev_p, u32 *stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
u8 page_size, __be64 *pbl, u32 *pbl_size, u8 page_size, __be64 *pbl, u32 *pbl_size,
...@@ -1029,7 +1017,7 @@ void __exit cxio_hal_exit(void) ...@@ -1029,7 +1017,7 @@ void __exit cxio_hal_exit(void)
cxio_hal_destroy_rhdl_resource(); cxio_hal_destroy_rhdl_resource();
} }
static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) static void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
{ {
struct t3_swsq *sqp; struct t3_swsq *sqp;
__u32 ptr = wq->sq_rptr; __u32 ptr = wq->sq_rptr;
...@@ -1058,9 +1046,8 @@ static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq) ...@@ -1058,9 +1046,8 @@ static inline void flush_completed_wrs(struct t3_wq *wq, struct t3_cq *cq)
break; break;
} }
static inline void create_read_req_cqe(struct t3_wq *wq, static void create_read_req_cqe(struct t3_wq *wq, struct t3_cqe *hw_cqe,
struct t3_cqe *hw_cqe, struct t3_cqe *read_cqe)
struct t3_cqe *read_cqe)
{ {
read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr; read_cqe->u.scqe.wrid_hi = wq->oldest_read->sq_wptr;
read_cqe->len = wq->oldest_read->read_len; read_cqe->len = wq->oldest_read->read_len;
...@@ -1073,7 +1060,7 @@ static inline void create_read_req_cqe(struct t3_wq *wq, ...@@ -1073,7 +1060,7 @@ static inline void create_read_req_cqe(struct t3_wq *wq,
/* /*
* Return a ptr to the next read wr in the SWSQ or NULL. * Return a ptr to the next read wr in the SWSQ or NULL.
*/ */
static inline void advance_oldest_read(struct t3_wq *wq) static void advance_oldest_read(struct t3_wq *wq)
{ {
u32 rptr = wq->oldest_read - wq->sq + 1; u32 rptr = wq->oldest_read - wq->sq + 1;
......
...@@ -143,7 +143,6 @@ int cxio_rdev_open(struct cxio_rdev *rdev); ...@@ -143,7 +143,6 @@ int cxio_rdev_open(struct cxio_rdev *rdev);
void cxio_rdev_close(struct cxio_rdev *rdev); void cxio_rdev_close(struct cxio_rdev *rdev);
int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq, int cxio_hal_cq_op(struct cxio_rdev *rdev, struct t3_cq *cq,
enum t3_cq_opcode op, u32 credit); enum t3_cq_opcode op, u32 credit);
int cxio_hal_clear_qp_ctx(struct cxio_rdev *rdev, u32 qpid);
int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_create_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_destroy_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq); int cxio_resize_cq(struct cxio_rdev *rdev, struct t3_cq *cq);
...@@ -154,8 +153,6 @@ int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq, ...@@ -154,8 +153,6 @@ int cxio_create_qp(struct cxio_rdev *rdev, u32 kernel_domain, struct t3_wq *wq,
int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq, int cxio_destroy_qp(struct cxio_rdev *rdev, struct t3_wq *wq,
struct cxio_ucontext *uctx); struct cxio_ucontext *uctx);
int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode); int cxio_peek_cq(struct t3_wq *wr, struct t3_cq *cq, int opcode);
int cxio_allocate_stag(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
enum tpt_mem_perm perm, u32 * pbl_size, u32 * pbl_addr);
int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid, int cxio_register_phys_mem(struct cxio_rdev *rdev, u32 * stag, u32 pdid,
enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len, enum tpt_mem_perm perm, u32 zbva, u64 to, u32 len,
u8 page_size, __be64 *pbl, u32 *pbl_size, u8 page_size, __be64 *pbl, u32 *pbl_size,
...@@ -171,8 +168,6 @@ int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag); ...@@ -171,8 +168,6 @@ int cxio_deallocate_window(struct cxio_rdev *rdev, u32 stag);
int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr); int cxio_rdma_init(struct cxio_rdev *rdev, struct t3_rdma_init_attr *attr);
void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb); void cxio_register_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb); void cxio_unregister_ev_cb(cxio_hal_ev_callback_func_t ev_cb);
u32 cxio_hal_get_rhdl(void);
void cxio_hal_put_rhdl(u32 rhdl);
u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp); u32 cxio_hal_get_pdid(struct cxio_hal_resource *rscp);
void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid); void cxio_hal_put_pdid(struct cxio_hal_resource *rscp, u32 pdid);
int __init cxio_hal_init(void); int __init cxio_hal_init(void);
......
...@@ -179,7 +179,7 @@ int cxio_hal_init_resource(struct cxio_rdev *rdev_p, ...@@ -179,7 +179,7 @@ int cxio_hal_init_resource(struct cxio_rdev *rdev_p,
/* /*
* returns 0 if no resource available * returns 0 if no resource available
*/ */
static inline u32 cxio_hal_get_resource(struct kfifo *fifo) static u32 cxio_hal_get_resource(struct kfifo *fifo)
{ {
u32 entry; u32 entry;
if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32))) if (kfifo_get(fifo, (unsigned char *) &entry, sizeof(u32)))
...@@ -188,21 +188,11 @@ static inline u32 cxio_hal_get_resource(struct kfifo *fifo) ...@@ -188,21 +188,11 @@ static inline u32 cxio_hal_get_resource(struct kfifo *fifo)
return 0; /* fifo emptry */ return 0; /* fifo emptry */
} }
static inline void cxio_hal_put_resource(struct kfifo *fifo, u32 entry) static void cxio_hal_put_resource(struct kfifo *fifo, u32 entry)
{ {
BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0); BUG_ON(kfifo_put(fifo, (unsigned char *) &entry, sizeof(u32)) == 0);
} }
u32 cxio_hal_get_rhdl(void)
{
return cxio_hal_get_resource(rhdl_fifo);
}
void cxio_hal_put_rhdl(u32 rhdl)
{
cxio_hal_put_resource(rhdl_fifo, rhdl);
}
u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp) u32 cxio_hal_get_stag(struct cxio_hal_resource *rscp)
{ {
return cxio_hal_get_resource(rscp->tpt_fifo); return cxio_hal_get_resource(rscp->tpt_fifo);
......
...@@ -209,8 +209,7 @@ static enum iwch_ep_state state_read(struct iwch_ep_common *epc) ...@@ -209,8 +209,7 @@ static enum iwch_ep_state state_read(struct iwch_ep_common *epc)
return state; return state;
} }
static inline void __state_set(struct iwch_ep_common *epc, static void __state_set(struct iwch_ep_common *epc, enum iwch_ep_state new)
enum iwch_ep_state new)
{ {
epc->state = new; epc->state = new;
} }
...@@ -1459,7 +1458,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) ...@@ -1459,7 +1458,7 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
/* /*
* Returns whether an ABORT_REQ_RSS message is a negative advice. * Returns whether an ABORT_REQ_RSS message is a negative advice.
*/ */
static inline int is_neg_adv_abort(unsigned int status) static int is_neg_adv_abort(unsigned int status)
{ {
return status == CPL_ERR_RTX_NEG_ADVICE || return status == CPL_ERR_RTX_NEG_ADVICE ||
status == CPL_ERR_PERSIST_NEG_ADVICE; status == CPL_ERR_PERSIST_NEG_ADVICE;
...@@ -1635,6 +1634,7 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) ...@@ -1635,6 +1634,7 @@ static int ec_status(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n", printk(KERN_ERR MOD "%s BAD CLOSE - Aborting tid %u\n",
__FUNCTION__, ep->hwtid); __FUNCTION__, ep->hwtid);
stop_ep_timer(ep);
attrs.next_state = IWCH_QP_STATE_ERROR; attrs.next_state = IWCH_QP_STATE_ERROR;
iwch_modify_qp(ep->com.qp->rhp, iwch_modify_qp(ep->com.qp->rhp,
ep->com.qp, IWCH_QP_ATTR_NEXT_STATE, ep->com.qp, IWCH_QP_ATTR_NEXT_STATE,
......
...@@ -948,7 +948,7 @@ void iwch_qp_rem_ref(struct ib_qp *qp) ...@@ -948,7 +948,7 @@ void iwch_qp_rem_ref(struct ib_qp *qp)
wake_up(&(to_iwch_qp(qp)->wait)); wake_up(&(to_iwch_qp(qp)->wait));
} }
struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn) static struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn)
{ {
PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn); PDBG("%s ib_dev %p qpn 0x%x\n", __FUNCTION__, dev, qpn);
return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn); return (struct ib_qp *)get_qhp(to_iwch_dev(dev), qpn);
......
...@@ -178,7 +178,6 @@ static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp) ...@@ -178,7 +178,6 @@ static inline struct iwch_qp *to_iwch_qp(struct ib_qp *ibqp)
void iwch_qp_add_ref(struct ib_qp *qp); void iwch_qp_add_ref(struct ib_qp *qp);
void iwch_qp_rem_ref(struct ib_qp *qp); void iwch_qp_rem_ref(struct ib_qp *qp);
struct ib_qp *iwch_get_qp(struct ib_device *dev, int qpn);
struct iwch_ucontext { struct iwch_ucontext {
struct ib_ucontext ibucontext; struct ib_ucontext ibucontext;
......
...@@ -36,8 +36,8 @@ ...@@ -36,8 +36,8 @@
#define NO_SUPPORT -1 #define NO_SUPPORT -1
static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, static int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
u8 * flit_cnt) u8 * flit_cnt)
{ {
int i; int i;
u32 plen; u32 plen;
...@@ -96,8 +96,8 @@ static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr, ...@@ -96,8 +96,8 @@ static inline int iwch_build_rdma_send(union t3_wr *wqe, struct ib_send_wr *wr,
return 0; return 0;
} }
static inline int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, static int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
u8 *flit_cnt) u8 *flit_cnt)
{ {
int i; int i;
u32 plen; u32 plen;
...@@ -137,8 +137,8 @@ static inline int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr, ...@@ -137,8 +137,8 @@ static inline int iwch_build_rdma_write(union t3_wr *wqe, struct ib_send_wr *wr,
return 0; return 0;
} }
static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, static int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
u8 *flit_cnt) u8 *flit_cnt)
{ {
if (wr->num_sge > 1) if (wr->num_sge > 1)
return -EINVAL; return -EINVAL;
...@@ -158,9 +158,8 @@ static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr, ...@@ -158,9 +158,8 @@ static inline int iwch_build_rdma_read(union t3_wr *wqe, struct ib_send_wr *wr,
/* /*
* TBD: this is going to be moved to firmware. Missing pdid/qpid check for now. * TBD: this is going to be moved to firmware. Missing pdid/qpid check for now.
*/ */
static inline int iwch_sgl2pbl_map(struct iwch_dev *rhp, static int iwch_sgl2pbl_map(struct iwch_dev *rhp, struct ib_sge *sg_list,
struct ib_sge *sg_list, u32 num_sgle, u32 num_sgle, u32 * pbl_addr, u8 * page_size)
u32 * pbl_addr, u8 * page_size)
{ {
int i; int i;
struct iwch_mr *mhp; struct iwch_mr *mhp;
...@@ -206,9 +205,8 @@ static inline int iwch_sgl2pbl_map(struct iwch_dev *rhp, ...@@ -206,9 +205,8 @@ static inline int iwch_sgl2pbl_map(struct iwch_dev *rhp,
return 0; return 0;
} }
static inline int iwch_build_rdma_recv(struct iwch_dev *rhp, static int iwch_build_rdma_recv(struct iwch_dev *rhp, union t3_wr *wqe,
union t3_wr *wqe, struct ib_recv_wr *wr)
struct ib_recv_wr *wr)
{ {
int i, err = 0; int i, err = 0;
u32 pbl_addr[4]; u32 pbl_addr[4];
...@@ -473,8 +471,7 @@ int iwch_bind_mw(struct ib_qp *qp, ...@@ -473,8 +471,7 @@ int iwch_bind_mw(struct ib_qp *qp,
return err; return err;
} }
static inline void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, static void build_term_codes(int t3err, u8 *layer_type, u8 *ecode, int tagged)
int tagged)
{ {
switch (t3err) { switch (t3err) {
case TPT_ERR_STAG: case TPT_ERR_STAG:
...@@ -672,7 +669,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag) ...@@ -672,7 +669,7 @@ static void __flush_qp(struct iwch_qp *qhp, unsigned long *flag)
spin_lock_irqsave(&qhp->lock, *flag); spin_lock_irqsave(&qhp->lock, *flag);
} }
static inline void flush_qp(struct iwch_qp *qhp, unsigned long *flag) static void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
{ {
if (t3b_device(qhp->rhp)) if (t3b_device(qhp->rhp))
cxio_set_wq_in_error(&qhp->wq); cxio_set_wq_in_error(&qhp->wq);
...@@ -684,7 +681,7 @@ static inline void flush_qp(struct iwch_qp *qhp, unsigned long *flag) ...@@ -684,7 +681,7 @@ static inline void flush_qp(struct iwch_qp *qhp, unsigned long *flag)
/* /*
* Return non zero if at least one RECV was pre-posted. * Return non zero if at least one RECV was pre-posted.
*/ */
static inline int rqes_posted(struct iwch_qp *qhp) static int rqes_posted(struct iwch_qp *qhp)
{ {
return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV; return fw_riwrh_opcode((struct fw_riwrh *)qhp->wq.queue) == T3_WR_RCV;
} }
......
...@@ -310,8 +310,9 @@ int mthca_write_mtt_size(struct mthca_dev *dev) ...@@ -310,8 +310,9 @@ int mthca_write_mtt_size(struct mthca_dev *dev)
return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff; return mthca_is_memfree(dev) ? (PAGE_SIZE / sizeof (u64)) : 0x7ffffff;
} }
void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, static void mthca_tavor_write_mtt_seg(struct mthca_dev *dev,
int start_index, u64 *buffer_list, int list_len) struct mthca_mtt *mtt, int start_index,
u64 *buffer_list, int list_len)
{ {
u64 __iomem *mtts; u64 __iomem *mtts;
int i; int i;
...@@ -323,8 +324,9 @@ void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, ...@@ -323,8 +324,9 @@ void mthca_tavor_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt,
mtts + i); mtts + i);
} }
void mthca_arbel_write_mtt_seg(struct mthca_dev *dev, struct mthca_mtt *mtt, static void mthca_arbel_write_mtt_seg(struct mthca_dev *dev,
int start_index, u64 *buffer_list, int list_len) struct mthca_mtt *mtt, int start_index,
u64 *buffer_list, int list_len)
{ {
__be64 *mtts; __be64 *mtts;
dma_addr_t dma_handle; dma_addr_t dma_handle;
......
...@@ -219,7 +219,6 @@ struct ipoib_dev_priv { ...@@ -219,7 +219,6 @@ struct ipoib_dev_priv {
union ib_gid local_gid; union ib_gid local_gid;
u16 local_lid; u16 local_lid;
u8 local_rate;
unsigned int admin_mtu; unsigned int admin_mtu;
unsigned int mcast_mtu; unsigned int mcast_mtu;
......
...@@ -65,14 +65,14 @@ struct ipoib_cm_id { ...@@ -65,14 +65,14 @@ struct ipoib_cm_id {
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id, static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event); struct ib_cm_event *event);
static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
u64 mapping[IPOIB_CM_RX_SG]) u64 mapping[IPOIB_CM_RX_SG])
{ {
int i; int i;
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE); ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < IPOIB_CM_RX_SG - 1; ++i) for (i = 0; i < frags; ++i)
ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
} }
...@@ -90,7 +90,8 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id) ...@@ -90,7 +90,8 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id)
ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr); ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
if (unlikely(ret)) { if (unlikely(ret)) {
ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret); ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[id].mapping); ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
priv->cm.srq_ring[id].mapping);
dev_kfree_skb_any(priv->cm.srq_ring[id].skb); dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
priv->cm.srq_ring[id].skb = NULL; priv->cm.srq_ring[id].skb = NULL;
} }
...@@ -98,8 +99,8 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id) ...@@ -98,8 +99,8 @@ static int ipoib_cm_post_receive(struct net_device *dev, int id)
return ret; return ret;
} }
static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, int frags,
u64 mapping[IPOIB_CM_RX_SG]) u64 mapping[IPOIB_CM_RX_SG])
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb; struct sk_buff *skb;
...@@ -107,7 +108,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, ...@@ -107,7 +108,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id,
skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12); skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
if (unlikely(!skb)) if (unlikely(!skb))
return -ENOMEM; return NULL;
/* /*
* IPoIB adds a 4 byte header. So we need 12 more bytes to align the * IPoIB adds a 4 byte header. So we need 12 more bytes to align the
...@@ -119,10 +120,10 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, ...@@ -119,10 +120,10 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) { if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return -EIO; return NULL;
} }
for (i = 0; i < IPOIB_CM_RX_SG - 1; i++) { for (i = 0; i < frags; i++) {
struct page *page = alloc_page(GFP_ATOMIC); struct page *page = alloc_page(GFP_ATOMIC);
if (!page) if (!page)
...@@ -136,7 +137,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, ...@@ -136,7 +137,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id,
} }
priv->cm.srq_ring[id].skb = skb; priv->cm.srq_ring[id].skb = skb;
return 0; return skb;
partial_error: partial_error:
...@@ -146,7 +147,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id, ...@@ -146,7 +147,7 @@ static int ipoib_cm_alloc_rx_skb(struct net_device *dev, int id,
ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE); ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return -ENOMEM; return NULL;
} }
static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev, static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
...@@ -309,7 +310,7 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id, ...@@ -309,7 +310,7 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
} }
/* Adjust length of skb with fragments to match received data */ /* Adjust length of skb with fragments to match received data */
static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
unsigned int length) unsigned int length, struct sk_buff *toskb)
{ {
int i, num_frags; int i, num_frags;
unsigned int size; unsigned int size;
...@@ -326,7 +327,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space, ...@@ -326,7 +327,7 @@ static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
if (length == 0) { if (length == 0) {
/* don't need this page */ /* don't need this page */
__free_page(frag->page); skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
--skb_shinfo(skb)->nr_frags; --skb_shinfo(skb)->nr_frags;
} else { } else {
size = min(length, (unsigned) PAGE_SIZE); size = min(length, (unsigned) PAGE_SIZE);
...@@ -344,10 +345,11 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -344,10 +345,11 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ; unsigned int wr_id = wc->wr_id & ~IPOIB_CM_OP_SRQ;
struct sk_buff *skb; struct sk_buff *skb, *newskb;
struct ipoib_cm_rx *p; struct ipoib_cm_rx *p;
unsigned long flags; unsigned long flags;
u64 mapping[IPOIB_CM_RX_SG]; u64 mapping[IPOIB_CM_RX_SG];
int frags;
ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n", ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n",
wr_id, wc->opcode, wc->status); wr_id, wc->opcode, wc->status);
...@@ -383,7 +385,11 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -383,7 +385,11 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
} }
} }
if (unlikely(ipoib_cm_alloc_rx_skb(dev, wr_id, mapping))) { frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
newskb = ipoib_cm_alloc_rx_skb(dev, wr_id, frags, mapping);
if (unlikely(!newskb)) {
/* /*
* If we can't allocate a new RX buffer, dump * If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer. * this packet and reuse the old buffer.
...@@ -393,13 +399,13 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -393,13 +399,13 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
goto repost; goto repost;
} }
ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[wr_id].mapping); ipoib_cm_dma_unmap_rx(priv, frags, priv->cm.srq_ring[wr_id].mapping);
memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, sizeof mapping); memcpy(priv->cm.srq_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid); wc->byte_len, wc->slid);
skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len); skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
skb->protocol = ((struct ipoib_header *) skb->data)->proto; skb->protocol = ((struct ipoib_header *) skb->data)->proto;
skb->mac.raw = skb->data; skb->mac.raw = skb->data;
...@@ -1193,7 +1199,8 @@ int ipoib_cm_dev_init(struct net_device *dev) ...@@ -1193,7 +1199,8 @@ int ipoib_cm_dev_init(struct net_device *dev)
priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG; priv->cm.rx_wr.num_sge = IPOIB_CM_RX_SG;
for (i = 0; i < ipoib_recvq_size; ++i) { for (i = 0; i < ipoib_recvq_size; ++i) {
if (ipoib_cm_alloc_rx_skb(dev, i, priv->cm.srq_ring[i].mapping)) { if (!ipoib_cm_alloc_rx_skb(dev, i, IPOIB_CM_RX_SG - 1,
priv->cm.srq_ring[i].mapping)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
ipoib_cm_dev_cleanup(dev); ipoib_cm_dev_cleanup(dev);
return -ENOMEM; return -ENOMEM;
...@@ -1228,7 +1235,8 @@ void ipoib_cm_dev_cleanup(struct net_device *dev) ...@@ -1228,7 +1235,8 @@ void ipoib_cm_dev_cleanup(struct net_device *dev)
return; return;
for (i = 0; i < ipoib_recvq_size; ++i) for (i = 0; i < ipoib_recvq_size; ++i)
if (priv->cm.srq_ring[i].skb) { if (priv->cm.srq_ring[i].skb) {
ipoib_cm_dma_unmap_rx(priv, priv->cm.srq_ring[i].mapping); ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
priv->cm.srq_ring[i].mapping);
dev_kfree_skb_any(priv->cm.srq_ring[i].skb); dev_kfree_skb_any(priv->cm.srq_ring[i].skb);
priv->cm.srq_ring[i].skb = NULL; priv->cm.srq_ring[i].skb = NULL;
} }
......
...@@ -385,7 +385,7 @@ static void path_rec_completion(int status, ...@@ -385,7 +385,7 @@ static void path_rec_completion(int status,
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags; unsigned long flags;
if (pathrec) if (!status)
ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n", ipoib_dbg(priv, "PathRec LID 0x%04x for GID " IPOIB_GID_FMT "\n",
be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid)); be16_to_cpu(pathrec->dlid), IPOIB_GID_ARG(pathrec->dgid));
else else
......
...@@ -527,11 +527,9 @@ void ipoib_mcast_join_task(struct work_struct *work) ...@@ -527,11 +527,9 @@ void ipoib_mcast_join_task(struct work_struct *work)
{ {
struct ib_port_attr attr; struct ib_port_attr attr;
if (!ib_query_port(priv->ca, priv->port, &attr)) { if (!ib_query_port(priv->ca, priv->port, &attr))
priv->local_lid = attr.lid; priv->local_lid = attr.lid;
priv->local_rate = attr.active_speed * else
ib_width_enum_to_int(attr.active_width);
} else
ipoib_warn(priv, "ib_query_port failed\n"); ipoib_warn(priv, "ib_query_port failed\n");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment