Commit 51ce5f33 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:

 "I had thought at the time of the last pull request that there wouldn't
  be much more to go, but several things just kept trickling in over the
  last week.

  Instead of just the six patches to bnxt_re that I had anticipated,
  there are another five IPoIB patches, two qedr patches, and a few
  other miscellaneous patches.

  The bnxt_re patches are more lines of diff than I like to submit this
  late in the game. That's mostly because of the first two patches in
  the series of six. I almost dropped them just because of the lines of
  churn, but on a close review, a lot of the churn came from removing
  duplicated code sections and consolidating them into callable
  routines. I felt like this made the number of lines of change more
  acceptable, and they address problems, so I left them. The remainder
  of the patches are all small, well contained, and well understood.

  These have passed 0day testing, but have not been submitted to
  linux-next (but a local merge test with your current master was
  without any conflicts).

  Summary:

   - A fix for fix eea40b8f ("infiniband: call ipv6 route lookup via
     the stub interface")

   - Six patches against bnxt_re...the first two are considerably larger
     than I would like, but as they address real issues I went ahead and
     submitted them (it also helped that a good deal of the churn was
     removing code repeated in multiple places and consolidating it to
     one common function)

   - Two fixes against qedr that just came in

   - One fix against rxe that took a few revisions to get right plus
     time to get the proper reviews

   - Five late breaking IPoIB fixes

   - One late cxgb4 fix"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma:
  rdma/cxgb4: Fix memory leaks during module exit
  IB/ipoib: Fix memory leak in create child syscall
  IB/ipoib: Fix access to un-initialized napi struct
  IB/ipoib: Delete napi in device uninit default
  IB/ipoib: Limit call to free rdma_netdev for capable devices
  IB/ipoib: Fix memory leaks for child interfaces priv
  rxe: Fix a sleep-in-atomic bug in post_one_send
  RDMA/qedr: Add 64KB PAGE_SIZE support to user-space queues
  RDMA/qedr: Initialize byte_len in WC of READ and SEND commands
  RDMA/bnxt_re: Remove FMR support
  RDMA/bnxt_re: Fix RQE posting logic
  RDMA/bnxt_re: Add HW workaround for avoiding stall for UD QPs
  RDMA/bnxt_re: Dereg MR in FW before freeing the fast_reg_page_list
  RDMA/bnxt_re: HW workarounds for handling specific conditions
  RDMA/bnxt_re: Fixing the Control path command and response handling
  IB/addr: Fix setting source address in addr6_resolve()
parents f69d64de d4702645
...@@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, ...@@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
return ret; return ret;
rt = (struct rt6_info *)dst; rt = (struct rt6_info *)dst;
if (ipv6_addr_any(&fl6.saddr)) { if (ipv6_addr_any(&src_in->sin6_addr)) {
ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
&fl6.daddr, 0, &fl6.saddr);
if (ret)
goto put;
src_in->sin6_family = AF_INET6; src_in->sin6_family = AF_INET6;
src_in->sin6_addr = fl6.saddr; src_in->sin6_addr = fl6.saddr;
} }
...@@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in, ...@@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
*pdst = dst; *pdst = dst;
return 0; return 0;
put:
dst_release(dst);
return ret;
} }
#else #else
static int addr6_resolve(struct sockaddr_in6 *src_in, static int addr6_resolve(struct sockaddr_in6 *src_in,
......
...@@ -56,6 +56,10 @@ ...@@ -56,6 +56,10 @@
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024) #define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
#define BNXT_RE_UD_QP_HW_STALL 0x400000
#define BNXT_RE_RQ_WQE_THRESHOLD 32
struct bnxt_re_work { struct bnxt_re_work {
struct work_struct work; struct work_struct work;
unsigned long event; unsigned long event;
......
This diff is collapsed.
...@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx { ...@@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx {
u32 refcnt; u32 refcnt;
}; };
#define BNXT_RE_FENCE_BYTES 64
struct bnxt_re_fence_data {
u32 size;
u8 va[BNXT_RE_FENCE_BYTES];
dma_addr_t dma_addr;
struct bnxt_re_mr *mr;
struct ib_mw *mw;
struct bnxt_qplib_swqe bind_wqe;
u32 bind_rkey;
};
struct bnxt_re_pd { struct bnxt_re_pd {
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_pd ib_pd; struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd; struct bnxt_qplib_pd qplib_pd;
struct bnxt_qplib_dpi dpi; struct bnxt_qplib_dpi dpi;
struct bnxt_re_fence_data fence;
}; };
struct bnxt_re_ah { struct bnxt_re_ah {
...@@ -62,6 +74,7 @@ struct bnxt_re_qp { ...@@ -62,6 +74,7 @@ struct bnxt_re_qp {
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_qp ib_qp; struct ib_qp ib_qp;
spinlock_t sq_lock; /* protect sq */ spinlock_t sq_lock; /* protect sq */
spinlock_t rq_lock; /* protect rq */
struct bnxt_qplib_qp qplib_qp; struct bnxt_qplib_qp qplib_qp;
struct ib_umem *sumem; struct ib_umem *sumem;
struct ib_umem *rumem; struct ib_umem *rumem;
...@@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents, ...@@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
u32 max_num_sg); u32 max_num_sg);
int bnxt_re_dereg_mr(struct ib_mr *mr); int bnxt_re_dereg_mr(struct ib_mr *mr);
struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_fmr_attr *fmr_attr); struct ib_udata *udata);
int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, int bnxt_re_dealloc_mw(struct ib_mw *mw);
u64 iova);
int bnxt_re_unmap_fmr(struct list_head *fmr_list);
int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, u64 virt_addr, int mr_access_flags,
struct ib_udata *udata); struct ib_udata *udata);
......
...@@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) ...@@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->dereg_mr = bnxt_re_dereg_mr; ibdev->dereg_mr = bnxt_re_dereg_mr;
ibdev->alloc_mr = bnxt_re_alloc_mr; ibdev->alloc_mr = bnxt_re_alloc_mr;
ibdev->map_mr_sg = bnxt_re_map_mr_sg; ibdev->map_mr_sg = bnxt_re_map_mr_sg;
ibdev->alloc_fmr = bnxt_re_alloc_fmr;
ibdev->map_phys_fmr = bnxt_re_map_phys_fmr;
ibdev->unmap_fmr = bnxt_re_unmap_fmr;
ibdev->dealloc_fmr = bnxt_re_dealloc_fmr;
ibdev->reg_user_mr = bnxt_re_reg_user_mr; ibdev->reg_user_mr = bnxt_re_reg_user_mr;
ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;
......
This diff is collapsed.
...@@ -88,6 +88,7 @@ struct bnxt_qplib_swq { ...@@ -88,6 +88,7 @@ struct bnxt_qplib_swq {
struct bnxt_qplib_swqe { struct bnxt_qplib_swqe {
/* General */ /* General */
#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
u64 wr_id; u64 wr_id;
u8 reqs_type; u8 reqs_type;
u8 type; u8 type;
...@@ -216,9 +217,16 @@ struct bnxt_qplib_q { ...@@ -216,9 +217,16 @@ struct bnxt_qplib_q {
struct scatterlist *sglist; struct scatterlist *sglist;
u32 nmap; u32 nmap;
u32 max_wqe; u32 max_wqe;
u16 q_full_delta;
u16 max_sge; u16 max_sge;
u32 psn; u32 psn;
bool flush_in_progress; bool flush_in_progress;
bool condition;
bool single;
bool send_phantom;
u32 phantom_wqe_cnt;
u32 phantom_cqe_cnt;
u32 next_cq_cons;
}; };
struct bnxt_qplib_qp { struct bnxt_qplib_qp {
...@@ -242,6 +250,7 @@ struct bnxt_qplib_qp { ...@@ -242,6 +250,7 @@ struct bnxt_qplib_qp {
u8 timeout; u8 timeout;
u8 retry_cnt; u8 retry_cnt;
u8 rnr_retry; u8 rnr_retry;
u64 wqe_cnt;
u32 min_rnr_timer; u32 min_rnr_timer;
u32 max_rd_atomic; u32 max_rd_atomic;
u32 max_dest_rd_atomic; u32 max_dest_rd_atomic;
...@@ -301,6 +310,13 @@ struct bnxt_qplib_qp { ...@@ -301,6 +310,13 @@ struct bnxt_qplib_qp {
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
!((raw_cons) & (cp_bit))) !((raw_cons) & (cp_bit)))
static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
{
return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
&qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
&qplib_q->hwq);
}
struct bnxt_qplib_cqe { struct bnxt_qplib_cqe {
u8 status; u8 status;
u8 type; u8 type;
...@@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, ...@@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num); int num, struct bnxt_qplib_qp **qp);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);
......
This diff is collapsed.
...@@ -73,6 +73,7 @@ ...@@ -73,6 +73,7 @@
#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
#define RCFW_MAX_COOKIE_VALUE 0x7FFF #define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000 #define RCFW_CMD_IS_BLOCKING 0x8000
#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
/* Cmdq contains a fix number of a 16-Byte slots */ /* Cmdq contains a fix number of a 16-Byte slots */
struct bnxt_qplib_cmdqe { struct bnxt_qplib_cmdqe {
...@@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe { ...@@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe {
u8 data[1024]; u8 data[1024];
}; };
/* CRSQ SB */
#define BNXT_QPLIB_CRSBE_MAX_CNT 4
#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
static inline u32 get_crsb_pg(u32 val)
{
return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
}
static inline u32 get_crsb_idx(u32 val)
{
return val & MAX_CRSB_IDX_PER_PG;
}
static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
u32 prod, dma_addr_t *dma_addr)
{
*dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
*dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
BNXT_QPLIB_CRSBE_UNITS;
}
/* CREQ */ /* CREQ */
/* Allocate 1 per QP for async error notification for now */ /* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
...@@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val) ...@@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val)
#define CREQ_DB(db, raw_cons, cp_bit) \ #define CREQ_DB(db, raw_cons, cp_bit) \
writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
#define CREQ_ENTRY_POLL_BUDGET 0x100
/* HWQ */ /* HWQ */
struct bnxt_qplib_crsqe {
struct creq_qp_event qp_event; struct bnxt_qplib_crsq {
struct creq_qp_event *resp;
u32 req_size; u32 req_size;
}; };
struct bnxt_qplib_crsq { struct bnxt_qplib_rcfw_sbuf {
struct bnxt_qplib_crsqe *crsq; void *sb;
u32 prod; dma_addr_t dma_addr;
u32 cons; u32 size;
u32 max_elements;
}; };
/* RCFW Communication Channels */ /* RCFW Communication Channels */
...@@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw { ...@@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw {
wait_queue_head_t waitq; wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *, int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *); struct creq_func_event *);
atomic_t seq_num; u32 seq_num;
/* Bar region info */ /* Bar region info */
void __iomem *cmdq_bar_reg_iomem; void __iomem *cmdq_bar_reg_iomem;
...@@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw { ...@@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw {
/* Actual Cmd and Resp Queues */ /* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq; struct bnxt_qplib_hwq cmdq;
struct bnxt_qplib_crsq crsq; struct bnxt_qplib_crsq *crsqe_tbl;
struct bnxt_qplib_hwq crsb;
}; };
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
...@@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev, ...@@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
(struct bnxt_qplib_rcfw *, (struct bnxt_qplib_rcfw *,
struct creq_func_event *)); struct creq_func_event *));
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); struct bnxt_qplib_rcfw *rcfw,
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, u32 size);
struct cmdq_base *req, void **crsbe, void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
u8 is_block); struct bnxt_qplib_rcfw_sbuf *sbuf);
int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
struct cmdq_base *req, struct creq_base *resp,
void *sbuf, u8 is_block);
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
......
...@@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero; ...@@ -48,6 +48,10 @@ extern const struct bnxt_qplib_gid bnxt_qplib_gid_zero;
#define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1)) #define HWQ_CMP(idx, hwq) ((idx) & ((hwq)->max_elements - 1))
#define HWQ_FREE_SLOTS(hwq) (hwq->max_elements - \
((HWQ_CMP(hwq->prod, hwq)\
- HWQ_CMP(hwq->cons, hwq))\
& (hwq->max_elements - 1)))
enum bnxt_qplib_hwq_type { enum bnxt_qplib_hwq_type {
HWQ_TYPE_CTX, HWQ_TYPE_CTX,
HWQ_TYPE_QUEUE, HWQ_TYPE_QUEUE,
......
This diff is collapsed.
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
#ifndef __BNXT_QPLIB_SP_H__ #ifndef __BNXT_QPLIB_SP_H__
#define __BNXT_QPLIB_SP_H__ #define __BNXT_QPLIB_SP_H__
#define BNXT_QPLIB_RESERVED_QP_WRS 128
struct bnxt_qplib_dev_attr { struct bnxt_qplib_dev_attr {
char fw_ver[32]; char fw_ver[32];
u16 max_sgid; u16 max_sgid;
......
...@@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, ...@@ -767,7 +767,7 @@ void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
kfree(entry); kfree(entry);
} }
list_for_each_safe(pos, nxt, &uctx->qpids) { list_for_each_safe(pos, nxt, &uctx->cqids) {
entry = list_entry(pos, struct c4iw_qid_list, entry); entry = list_entry(pos, struct c4iw_qid_list, entry);
list_del_init(&entry->entry); list_del_init(&entry->entry);
kfree(entry); kfree(entry);
...@@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -880,13 +880,15 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free"); rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
if (!rdev->free_workq) { if (!rdev->free_workq) {
err = -ENOMEM; err = -ENOMEM;
goto err_free_status_page; goto err_free_status_page_and_wr_log;
} }
rdev->status_page->db_off = 0; rdev->status_page->db_off = 0;
return 0; return 0;
err_free_status_page: err_free_status_page_and_wr_log:
if (c4iw_wr_log && rdev->wr_log)
kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page); free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool: destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev); c4iw_ocqp_pool_destroy(rdev);
...@@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev) ...@@ -903,9 +905,11 @@ static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{ {
destroy_workqueue(rdev->free_workq); destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log); kfree(rdev->wr_log);
c4iw_release_dev_ucontext(rdev, &rdev->uctx);
free_page((unsigned long)rdev->status_page); free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev); c4iw_pblpool_destroy(rdev);
c4iw_rqtpool_destroy(rdev); c4iw_rqtpool_destroy(rdev);
c4iw_ocqp_pool_destroy(rdev);
c4iw_destroy_resource(&rdev->resource); c4iw_destroy_resource(&rdev->resource);
} }
......
...@@ -3692,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) ...@@ -3692,8 +3692,10 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_port_immutable = mlx5_port_immutable; dev->ib_dev.get_port_immutable = mlx5_port_immutable;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str; dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev; if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads)) {
dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev; dev->ib_dev.alloc_rdma_netdev = mlx5_ib_alloc_rdma_netdev;
dev->ib_dev.free_rdma_netdev = mlx5_ib_free_rdma_netdev;
}
if (mlx5_core_is_pf(mdev)) { if (mlx5_core_is_pf(mdev)) {
dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config; dev->ib_dev.get_vf_config = mlx5_ib_get_vf_config;
dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state; dev->ib_dev.set_vf_link_state = mlx5_ib_set_vf_link_state;
......
...@@ -58,7 +58,10 @@ ...@@ -58,7 +58,10 @@
#define QEDR_MSG_QP " QP" #define QEDR_MSG_QP " QP"
#define QEDR_MSG_GSI " GSI" #define QEDR_MSG_GSI " GSI"
#define QEDR_CQ_MAGIC_NUMBER (0x11223344) #define QEDR_CQ_MAGIC_NUMBER (0x11223344)
#define FW_PAGE_SIZE (RDMA_RING_PAGE_SIZE)
#define FW_PAGE_SHIFT (12)
struct qedr_dev; struct qedr_dev;
......
...@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev, ...@@ -653,14 +653,15 @@ static int qedr_prepare_pbl_tbl(struct qedr_dev *dev,
static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
struct qedr_pbl *pbl, struct qedr_pbl *pbl,
struct qedr_pbl_info *pbl_info) struct qedr_pbl_info *pbl_info, u32 pg_shift)
{ {
int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0; int shift, pg_cnt, pages, pbe_cnt, total_num_pbes = 0;
u32 fw_pg_cnt, fw_pg_per_umem_pg;
struct qedr_pbl *pbl_tbl; struct qedr_pbl *pbl_tbl;
struct scatterlist *sg; struct scatterlist *sg;
struct regpair *pbe; struct regpair *pbe;
u64 pg_addr;
int entry; int entry;
u32 addr;
if (!pbl_info->num_pbes) if (!pbl_info->num_pbes)
return; return;
...@@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem, ...@@ -683,29 +684,35 @@ static void qedr_populate_pbls(struct qedr_dev *dev, struct ib_umem *umem,
shift = umem->page_shift; shift = umem->page_shift;
fw_pg_per_umem_pg = BIT(umem->page_shift - pg_shift);
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) {
pages = sg_dma_len(sg) >> shift; pages = sg_dma_len(sg) >> shift;
pg_addr = sg_dma_address(sg);
for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) { for (pg_cnt = 0; pg_cnt < pages; pg_cnt++) {
/* store the page address in pbe */ for (fw_pg_cnt = 0; fw_pg_cnt < fw_pg_per_umem_pg;) {
pbe->lo = cpu_to_le32(sg_dma_address(sg) + pbe->lo = cpu_to_le32(pg_addr);
(pg_cnt << shift)); pbe->hi = cpu_to_le32(upper_32_bits(pg_addr));
addr = upper_32_bits(sg_dma_address(sg) +
(pg_cnt << shift)); pg_addr += BIT(pg_shift);
pbe->hi = cpu_to_le32(addr); pbe_cnt++;
pbe_cnt++; total_num_pbes++;
total_num_pbes++; pbe++;
pbe++;
if (total_num_pbes == pbl_info->num_pbes)
if (total_num_pbes == pbl_info->num_pbes) return;
return;
/* If the given pbl is full storing the pbes,
/* If the given pbl is full storing the pbes, * move to next pbl.
* move to next pbl. */
*/ if (pbe_cnt ==
if (pbe_cnt == (pbl_info->pbl_size / sizeof(u64))) { (pbl_info->pbl_size / sizeof(u64))) {
pbl_tbl++; pbl_tbl++;
pbe = (struct regpair *)pbl_tbl->va; pbe = (struct regpair *)pbl_tbl->va;
pbe_cnt = 0; pbe_cnt = 0;
}
fw_pg_cnt++;
} }
} }
} }
...@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, ...@@ -754,7 +761,7 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
u64 buf_addr, size_t buf_len, u64 buf_addr, size_t buf_len,
int access, int dmasync) int access, int dmasync)
{ {
int page_cnt; u32 fw_pages;
int rc; int rc;
q->buf_addr = buf_addr; q->buf_addr = buf_addr;
...@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, ...@@ -766,8 +773,10 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
return PTR_ERR(q->umem); return PTR_ERR(q->umem);
} }
page_cnt = ib_umem_page_count(q->umem); fw_pages = ib_umem_page_count(q->umem) <<
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, page_cnt, 0); (q->umem->page_shift - FW_PAGE_SHIFT);
rc = qedr_prepare_pbl_tbl(dev, &q->pbl_info, fw_pages, 0);
if (rc) if (rc)
goto err0; goto err0;
...@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx, ...@@ -777,7 +786,8 @@ static inline int qedr_init_user_queue(struct ib_ucontext *ib_ctx,
goto err0; goto err0;
} }
qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info); qedr_populate_pbls(dev, q->umem, q->pbl_tbl, &q->pbl_info,
FW_PAGE_SHIFT);
return 0; return 0;
...@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len, ...@@ -2226,7 +2236,7 @@ struct ib_mr *qedr_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
goto err1; goto err1;
qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table, qedr_populate_pbls(dev, mr->umem, mr->info.pbl_table,
&mr->info.pbl_info); &mr->info.pbl_info, mr->umem->page_shift);
rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid); rc = dev->ops->rdma_alloc_tid(dev->rdma_ctx, &mr->hw_mr.itid);
if (rc) { if (rc) {
...@@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp, ...@@ -3209,6 +3219,10 @@ static int process_req(struct qedr_dev *dev, struct qedr_qp *qp,
case IB_WC_REG_MR: case IB_WC_REG_MR:
qp->wqe_wr_id[qp->sq.cons].mr->info.completed++; qp->wqe_wr_id[qp->sq.cons].mr->info.completed++;
break; break;
case IB_WC_RDMA_READ:
case IB_WC_SEND:
wc->byte_len = qp->wqe_wr_id[qp->sq.cons].bytes_len;
break;
default: default:
break; break;
} }
......
...@@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr, ...@@ -740,13 +740,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
sge = ibwr->sg_list; sge = ibwr->sg_list;
for (i = 0; i < num_sge; i++, sge++) { for (i = 0; i < num_sge; i++, sge++) {
if (qp->is_user && copy_from_user(p, (__user void *) memcpy(p, (void *)(uintptr_t)sge->addr,
(uintptr_t)sge->addr, sge->length)) sge->length);
return -EFAULT;
else if (!qp->is_user)
memcpy(p, (void *)(uintptr_t)sge->addr,
sge->length);
p += sge->length; p += sge->length;
} }
......
...@@ -863,7 +863,6 @@ int ipoib_ib_dev_open(struct net_device *dev) ...@@ -863,7 +863,6 @@ int ipoib_ib_dev_open(struct net_device *dev)
set_bit(IPOIB_STOP_REAPER, &priv->flags); set_bit(IPOIB_STOP_REAPER, &priv->flags);
cancel_delayed_work(&priv->ah_reap_task); cancel_delayed_work(&priv->ah_reap_task);
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags); set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
napi_enable(&priv->napi);
ipoib_ib_dev_stop(dev); ipoib_ib_dev_stop(dev);
return -1; return -1;
} }
......
...@@ -1596,6 +1596,8 @@ static void ipoib_dev_uninit_default(struct net_device *dev) ...@@ -1596,6 +1596,8 @@ static void ipoib_dev_uninit_default(struct net_device *dev)
ipoib_transport_dev_cleanup(dev); ipoib_transport_dev_cleanup(dev);
netif_napi_del(&priv->napi);
ipoib_cm_dev_cleanup(dev); ipoib_cm_dev_cleanup(dev);
kfree(priv->rx_ring); kfree(priv->rx_ring);
...@@ -1649,6 +1651,7 @@ static int ipoib_dev_init_default(struct net_device *dev) ...@@ -1649,6 +1651,7 @@ static int ipoib_dev_init_default(struct net_device *dev)
kfree(priv->rx_ring); kfree(priv->rx_ring);
out: out:
netif_napi_del(&priv->napi);
return -ENOMEM; return -ENOMEM;
} }
...@@ -2237,6 +2240,7 @@ static struct net_device *ipoib_add_port(const char *format, ...@@ -2237,6 +2240,7 @@ static struct net_device *ipoib_add_port(const char *format,
device_init_failed: device_init_failed:
free_netdev(priv->dev); free_netdev(priv->dev);
kfree(priv);
alloc_mem_failed: alloc_mem_failed:
return ERR_PTR(result); return ERR_PTR(result);
...@@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device) ...@@ -2277,7 +2281,7 @@ static void ipoib_add_one(struct ib_device *device)
static void ipoib_remove_one(struct ib_device *device, void *client_data) static void ipoib_remove_one(struct ib_device *device, void *client_data)
{ {
struct ipoib_dev_priv *priv, *tmp; struct ipoib_dev_priv *priv, *tmp, *cpriv, *tcpriv;
struct list_head *dev_list = client_data; struct list_head *dev_list = client_data;
if (!dev_list) if (!dev_list)
...@@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data) ...@@ -2300,7 +2304,14 @@ static void ipoib_remove_one(struct ib_device *device, void *client_data)
flush_workqueue(priv->wq); flush_workqueue(priv->wq);
unregister_netdev(priv->dev); unregister_netdev(priv->dev);
free_netdev(priv->dev); if (device->free_rdma_netdev)
device->free_rdma_netdev(priv->dev);
else
free_netdev(priv->dev);
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list)
kfree(cpriv);
kfree(priv); kfree(priv);
} }
......
...@@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ...@@ -133,13 +133,13 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
snprintf(intf_name, sizeof intf_name, "%s.%04x", snprintf(intf_name, sizeof intf_name, "%s.%04x",
ppriv->dev->name, pkey); ppriv->dev->name, pkey);
if (!rtnl_trylock())
return restart_syscall();
priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name); priv = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
if (!priv) if (!priv)
return -ENOMEM; return -ENOMEM;
if (!rtnl_trylock())
return restart_syscall();
down_write(&ppriv->vlan_rwsem); down_write(&ppriv->vlan_rwsem);
/* /*
...@@ -167,8 +167,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey) ...@@ -167,8 +167,10 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
rtnl_unlock(); rtnl_unlock();
if (result) if (result) {
free_netdev(priv->dev); free_netdev(priv->dev);
kfree(priv);
}
return result; return result;
} }
...@@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey) ...@@ -209,6 +211,7 @@ int ipoib_vlan_delete(struct net_device *pdev, unsigned short pkey)
if (dev) { if (dev) {
free_netdev(dev); free_netdev(dev);
kfree(priv);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment