Commit 1848757c authored by Doug Ledford's avatar Doug Ledford

Merge branches 'hns' and 'misc' into k.o/for-next

Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
menuconfig INFINIBAND menuconfig INFINIBAND
tristate "InfiniBand support" tristate "InfiniBand support"
depends on PCI || BROKEN
depends on HAS_IOMEM depends on HAS_IOMEM
depends on NET depends on NET
depends on INET depends on INET
......
...@@ -47,21 +47,28 @@ ...@@ -47,21 +47,28 @@
#include <rdma/ib_umem.h> #include <rdma/ib_umem.h>
#include <rdma/ib_user_verbs.h> #include <rdma/ib_user_verbs.h>
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \ static inline void
do { \ ib_uverbs_init_udata(struct ib_udata *udata,
(udata)->inbuf = (const void __user *) (ibuf); \ const void __user *ibuf,
(udata)->outbuf = (void __user *) (obuf); \ void __user *obuf,
(udata)->inlen = (ilen); \ size_t ilen, size_t olen)
(udata)->outlen = (olen); \ {
} while (0) udata->inbuf = ibuf;
udata->outbuf = obuf;
udata->inlen = ilen;
udata->outlen = olen;
}
#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \ static inline void
do { \ ib_uverbs_init_udata_buf_or_null(struct ib_udata *udata,
(udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \ const void __user *ibuf,
(udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \ void __user *obuf,
(udata)->inlen = (ilen); \ size_t ilen, size_t olen)
(udata)->outlen = (olen); \ {
} while (0) ib_uverbs_init_udata(udata,
ilen ? ibuf : NULL, olen ? obuf : NULL,
ilen, olen);
}
/* /*
* Our lifetime rules for these structs are the following: * Our lifetime rules for these structs are the following:
......
This diff is collapsed.
...@@ -376,7 +376,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me ...@@ -376,7 +376,7 @@ static struct uverbs_method_spec *build_method_with_attrs(const struct uverbs_me
min_id) || min_id) ||
WARN(attr_obj_with_special_access && WARN(attr_obj_with_special_access &&
!(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY), !(attr->flags & UVERBS_ATTR_SPEC_F_MANDATORY),
"ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy aceess but isn't mandatory\n", "ib_uverbs: Tried to merge attr (%d) but it's an object with new/destroy access but isn't mandatory\n",
min_id) || min_id) ||
WARN(IS_ATTR_OBJECT(attr) && WARN(IS_ATTR_OBJECT(attr) &&
attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ, attr->flags & UVERBS_ATTR_SPEC_F_MIN_SZ,
......
...@@ -763,7 +763,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -763,7 +763,7 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
} }
if (!access_ok(VERIFY_WRITE, if (!access_ok(VERIFY_WRITE,
(void __user *) (unsigned long) ex_hdr.response, u64_to_user_ptr(ex_hdr.response),
(hdr.out_words + ex_hdr.provider_out_words) * 8)) { (hdr.out_words + ex_hdr.provider_out_words) * 8)) {
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
...@@ -775,19 +775,17 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf, ...@@ -775,19 +775,17 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
} }
} }
INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response, ib_uverbs_init_udata_buf_or_null(&ucore, buf,
hdr.in_words * 8, hdr.out_words * 8); u64_to_user_ptr(ex_hdr.response),
hdr.in_words * 8, hdr.out_words * 8);
INIT_UDATA_BUF_OR_NULL(&uhw, ib_uverbs_init_udata_buf_or_null(&uhw,
buf + ucore.inlen, buf + ucore.inlen,
(unsigned long) ex_hdr.response + ucore.outlen, u64_to_user_ptr(ex_hdr.response) + ucore.outlen,
ex_hdr.provider_in_words * 8, ex_hdr.provider_in_words * 8,
ex_hdr.provider_out_words * 8); ex_hdr.provider_out_words * 8);
ret = uverbs_ex_cmd_table[command](file, ret = uverbs_ex_cmd_table[command](file, ib_dev, &ucore, &uhw);
ib_dev,
&ucore,
&uhw);
if (!ret) if (!ret)
ret = written_count; ret = written_count;
} else { } else {
......
...@@ -246,7 +246,8 @@ static void create_udata(struct uverbs_attr_bundle *ctx, ...@@ -246,7 +246,8 @@ static void create_udata(struct uverbs_attr_bundle *ctx,
outbuf_len = uhw_out->ptr_attr.len; outbuf_len = uhw_out->ptr_attr.len;
} }
INIT_UDATA_BUF_OR_NULL(udata, inbuf, outbuf, inbuf_len, outbuf_len); ib_uverbs_init_udata_buf_or_null(udata, inbuf, outbuf, inbuf_len,
outbuf_len);
} }
static int uverbs_create_cq_handler(struct ib_device *ib_dev, static int uverbs_create_cq_handler(struct ib_device *ib_dev,
......
config INFINIBAND_CXGB3 config INFINIBAND_CXGB3
tristate "Chelsio RDMA Driver" tristate "Chelsio RDMA Driver"
depends on CHELSIO_T3 && INET depends on CHELSIO_T3
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
---help--- ---help---
This is an iWARP/RDMA driver for the Chelsio T3 1GbE and This is an iWARP/RDMA driver for the Chelsio T3 1GbE and
......
...@@ -404,12 +404,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq, ...@@ -404,12 +404,10 @@ static void insert_sq_cqe(struct t3_wq *wq, struct t3_cq *cq,
int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count) int cxio_flush_sq(struct t3_wq *wq, struct t3_cq *cq, int count)
{ {
__u32 ptr; __u32 ptr = wq->sq_rptr + count;
int flushed = 0; int flushed = 0;
struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(wq->sq_rptr, wq->sq_size_log2); struct t3_swsq *sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
ptr = wq->sq_rptr + count;
sqp = wq->sq + Q_PTR2IDX(ptr, wq->sq_size_log2);
while (ptr != wq->sq_wptr) { while (ptr != wq->sq_wptr) {
sqp->signaled = 0; sqp->signaled = 0;
insert_sq_cqe(wq, cq, sqp); insert_sq_cqe(wq, cq, sqp);
......
config INFINIBAND_CXGB4 config INFINIBAND_CXGB4
tristate "Chelsio T4/T5 RDMA Driver" tristate "Chelsio T4/T5 RDMA Driver"
depends on CHELSIO_T4 && INET && (IPV6 || IPV6=n) depends on CHELSIO_T4 && INET
select CHELSIO_LIB select CHELSIO_LIB
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
---help--- ---help---
......
This diff is collapsed.
...@@ -144,7 +144,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, ...@@ -144,7 +144,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
ret = c4iw_ofld_send(rdev, skb); ret = c4iw_ofld_send(rdev, skb);
if (ret) if (ret)
goto err4; goto err4;
pr_debug("%s wait_event wr_wait %p\n", __func__, &wr_wait); pr_debug("wait_event wr_wait %p\n", &wr_wait);
ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__);
if (ret) if (ret)
goto err4; goto err4;
...@@ -178,7 +178,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq) ...@@ -178,7 +178,7 @@ static void insert_recv_cqe(struct t4_wq *wq, struct t4_cq *cq)
{ {
struct t4_cqe cqe; struct t4_cqe cqe;
pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
wq, cq, cq->sw_cidx, cq->sw_pidx); wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe)); memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
...@@ -197,7 +197,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count) ...@@ -197,7 +197,7 @@ int c4iw_flush_rq(struct t4_wq *wq, struct t4_cq *cq, int count)
int in_use = wq->rq.in_use - count; int in_use = wq->rq.in_use - count;
BUG_ON(in_use < 0); BUG_ON(in_use < 0);
pr_debug("%s wq %p cq %p rq.in_use %u skip count %u\n", __func__, pr_debug("wq %p cq %p rq.in_use %u skip count %u\n",
wq, cq, wq->rq.in_use, count); wq, cq, wq->rq.in_use, count);
while (in_use--) { while (in_use--) {
insert_recv_cqe(wq, cq); insert_recv_cqe(wq, cq);
...@@ -211,7 +211,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq, ...@@ -211,7 +211,7 @@ static void insert_sq_cqe(struct t4_wq *wq, struct t4_cq *cq,
{ {
struct t4_cqe cqe; struct t4_cqe cqe;
pr_debug("%s wq %p cq %p sw_cidx %u sw_pidx %u\n", __func__, pr_debug("wq %p cq %p sw_cidx %u sw_pidx %u\n",
wq, cq, cq->sw_cidx, cq->sw_pidx); wq, cq, cq->sw_cidx, cq->sw_pidx);
memset(&cqe, 0, sizeof(cqe)); memset(&cqe, 0, sizeof(cqe));
cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
...@@ -281,8 +281,8 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq) ...@@ -281,8 +281,8 @@ static void flush_completed_wrs(struct t4_wq *wq, struct t4_cq *cq)
/* /*
* Insert this completed cqe into the swcq. * Insert this completed cqe into the swcq.
*/ */
pr_debug("%s moving cqe into swcq sq idx %u cq idx %u\n", pr_debug("moving cqe into swcq sq idx %u cq idx %u\n",
__func__, cidx, cq->sw_pidx); cidx, cq->sw_pidx);
swsqe->cqe.header |= htonl(CQE_SWCQE_V(1)); swsqe->cqe.header |= htonl(CQE_SWCQE_V(1));
cq->sw_queue[cq->sw_pidx] = swsqe->cqe; cq->sw_queue[cq->sw_pidx] = swsqe->cqe;
t4_swcq_produce(cq); t4_swcq_produce(cq);
...@@ -337,7 +337,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp) ...@@ -337,7 +337,7 @@ void c4iw_flush_hw_cq(struct c4iw_cq *chp)
struct t4_swsqe *swsqe; struct t4_swsqe *swsqe;
int ret; int ret;
pr_debug("%s cqid 0x%x\n", __func__, chp->cq.cqid); pr_debug("cqid 0x%x\n", chp->cq.cqid);
ret = t4_next_hw_cqe(&chp->cq, &hw_cqe); ret = t4_next_hw_cqe(&chp->cq, &hw_cqe);
/* /*
...@@ -430,7 +430,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) ...@@ -430,7 +430,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
u32 ptr; u32 ptr;
*count = 0; *count = 0;
pr_debug("%s count zero %d\n", __func__, *count); pr_debug("count zero %d\n", *count);
ptr = cq->sw_cidx; ptr = cq->sw_cidx;
while (ptr != cq->sw_pidx) { while (ptr != cq->sw_pidx) {
cqe = &cq->sw_queue[ptr]; cqe = &cq->sw_queue[ptr];
...@@ -440,7 +440,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count) ...@@ -440,7 +440,7 @@ void c4iw_count_rcqes(struct t4_cq *cq, struct t4_wq *wq, int *count)
if (++ptr == cq->size) if (++ptr == cq->size)
ptr = 0; ptr = 0;
} }
pr_debug("%s cq %p count %d\n", __func__, cq, *count); pr_debug("cq %p count %d\n", cq, *count);
} }
/* /*
...@@ -471,8 +471,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ...@@ -471,8 +471,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (ret) if (ret)
return ret; return ret;
pr_debug("%s CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n", pr_debug("CQE OVF %u qpid 0x%0x genbit %u type %u status 0x%0x opcode 0x%0x len 0x%0x wrid_hi_stag 0x%x wrid_low_msn 0x%x\n",
__func__, CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe), CQE_OVFBIT(hw_cqe), CQE_QPID(hw_cqe),
CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe), CQE_GENBIT(hw_cqe), CQE_TYPE(hw_cqe), CQE_STATUS(hw_cqe),
CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe), CQE_OPCODE(hw_cqe), CQE_LEN(hw_cqe), CQE_WRID_HI(hw_cqe),
CQE_WRID_LOW(hw_cqe)); CQE_WRID_LOW(hw_cqe));
...@@ -603,8 +603,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ...@@ -603,8 +603,8 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) { if (!SW_CQE(hw_cqe) && (CQE_WRID_SQ_IDX(hw_cqe) != wq->sq.cidx)) {
struct t4_swsqe *swsqe; struct t4_swsqe *swsqe;
pr_debug("%s out of order completion going in sw_sq at idx %u\n", pr_debug("out of order completion going in sw_sq at idx %u\n",
__func__, CQE_WRID_SQ_IDX(hw_cqe)); CQE_WRID_SQ_IDX(hw_cqe));
swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)]; swsqe = &wq->sq.sw_sq[CQE_WRID_SQ_IDX(hw_cqe)];
swsqe->cqe = *hw_cqe; swsqe->cqe = *hw_cqe;
swsqe->complete = 1; swsqe->complete = 1;
...@@ -638,13 +638,13 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ...@@ -638,13 +638,13 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size); BUG_ON(wq->sq.in_use <= 0 && wq->sq.in_use >= wq->sq.size);
wq->sq.cidx = (uint16_t)idx; wq->sq.cidx = (uint16_t)idx;
pr_debug("%s completing sq idx %u\n", __func__, wq->sq.cidx); pr_debug("completing sq idx %u\n", wq->sq.cidx);
*cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id; *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
if (c4iw_wr_log) if (c4iw_wr_log)
c4iw_log_wr_stats(wq, hw_cqe); c4iw_log_wr_stats(wq, hw_cqe);
t4_sq_consume(wq); t4_sq_consume(wq);
} else { } else {
pr_debug("%s completing rq idx %u\n", __func__, wq->rq.cidx); pr_debug("completing rq idx %u\n", wq->rq.cidx);
*cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
BUG_ON(t4_rq_empty(wq)); BUG_ON(t4_rq_empty(wq));
if (c4iw_wr_log) if (c4iw_wr_log)
...@@ -661,12 +661,12 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, ...@@ -661,12 +661,12 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
skip_cqe: skip_cqe:
if (SW_CQE(hw_cqe)) { if (SW_CQE(hw_cqe)) {
pr_debug("%s cq %p cqid 0x%x skip sw cqe cidx %u\n", pr_debug("cq %p cqid 0x%x skip sw cqe cidx %u\n",
__func__, cq, cq->cqid, cq->sw_cidx); cq, cq->cqid, cq->sw_cidx);
t4_swcq_consume(cq); t4_swcq_consume(cq);
} else { } else {
pr_debug("%s cq %p cqid 0x%x skip hw cqe cidx %u\n", pr_debug("cq %p cqid 0x%x skip hw cqe cidx %u\n",
__func__, cq, cq->cqid, cq->cidx); cq, cq->cqid, cq->cidx);
t4_hwcq_consume(cq); t4_hwcq_consume(cq);
} }
return ret; return ret;
...@@ -712,8 +712,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc) ...@@ -712,8 +712,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
wc->vendor_err = CQE_STATUS(&cqe); wc->vendor_err = CQE_STATUS(&cqe);
wc->wc_flags = 0; wc->wc_flags = 0;
pr_debug("%s qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n", pr_debug("qpid 0x%x type %d opcode %d status 0x%x len %u wrid hi 0x%x lo 0x%x cookie 0x%llx\n",
__func__, CQE_QPID(&cqe), CQE_QPID(&cqe),
CQE_TYPE(&cqe), CQE_OPCODE(&cqe), CQE_TYPE(&cqe), CQE_OPCODE(&cqe),
CQE_STATUS(&cqe), CQE_LEN(&cqe), CQE_STATUS(&cqe), CQE_LEN(&cqe),
CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe), CQE_WRID_HI(&cqe), CQE_WRID_LOW(&cqe),
...@@ -857,7 +857,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq) ...@@ -857,7 +857,7 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq)
struct c4iw_cq *chp; struct c4iw_cq *chp;
struct c4iw_ucontext *ucontext; struct c4iw_ucontext *ucontext;
pr_debug("%s ib_cq %p\n", __func__, ib_cq); pr_debug("ib_cq %p\n", ib_cq);
chp = to_c4iw_cq(ib_cq); chp = to_c4iw_cq(ib_cq);
remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid); remove_handle(chp->rhp, &chp->rhp->cqidr, chp->cq.cqid);
...@@ -889,7 +889,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -889,7 +889,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
size_t memsize, hwentries; size_t memsize, hwentries;
struct c4iw_mm_entry *mm, *mm2; struct c4iw_mm_entry *mm, *mm2;
pr_debug("%s ib_dev %p entries %d\n", __func__, ibdev, entries); pr_debug("ib_dev %p entries %d\n", ibdev, entries);
if (attr->flags) if (attr->flags)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -996,8 +996,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, ...@@ -996,8 +996,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev,
mm2->len = PAGE_SIZE; mm2->len = PAGE_SIZE;
insert_mmap(ucontext, mm2); insert_mmap(ucontext, mm2);
} }
pr_debug("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n", pr_debug("cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
__func__, chp->cq.cqid, chp, chp->cq.size, chp->cq.cqid, chp, chp->cq.size,
chp->cq.memsize, (unsigned long long)chp->cq.dma_addr); chp->cq.memsize, (unsigned long long)chp->cq.dma_addr);
return &chp->ibcq; return &chp->ibcq;
err6: err6:
......
...@@ -811,8 +811,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev) ...@@ -811,8 +811,8 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->qpmask = rdev->lldi.udb_density - 1; rdev->qpmask = rdev->lldi.udb_density - 1;
rdev->cqmask = rdev->lldi.ucq_density - 1; rdev->cqmask = rdev->lldi.ucq_density - 1;
pr_debug("%s dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n", pr_debug("dev %s stag start 0x%0x size 0x%0x num stags %d pbl start 0x%0x size 0x%0x rq start 0x%0x size 0x%0x qp qid start %u size %u cq qid start %u size %u\n",
__func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start,
rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), rdev->lldi.vr->stag.size, c4iw_num_stags(rdev),
rdev->lldi.vr->pbl.start, rdev->lldi.vr->pbl.start,
rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start,
...@@ -935,7 +935,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx) ...@@ -935,7 +935,7 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
static void c4iw_remove(struct uld_ctx *ctx) static void c4iw_remove(struct uld_ctx *ctx)
{ {
pr_debug("%s c4iw_dev %p\n", __func__, ctx->dev); pr_debug("c4iw_dev %p\n", ctx->dev);
c4iw_unregister_device(ctx->dev); c4iw_unregister_device(ctx->dev);
c4iw_dealloc(ctx); c4iw_dealloc(ctx);
} }
...@@ -969,8 +969,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop) ...@@ -969,8 +969,8 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.lldi = *infop; devp->rdev.lldi = *infop;
/* init various hw-queue params based on lld info */ /* init various hw-queue params based on lld info */
pr_debug("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n", pr_debug("Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
__func__, devp->rdev.lldi.sge_ingpadboundary, devp->rdev.lldi.sge_ingpadboundary,
devp->rdev.lldi.sge_egrstatuspagesize); devp->rdev.lldi.sge_egrstatuspagesize);
devp->rdev.hw_queue.t4_eq_status_entries = devp->rdev.hw_queue.t4_eq_status_entries =
...@@ -1069,8 +1069,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop) ...@@ -1069,8 +1069,8 @@ static void *c4iw_uld_add(const struct cxgb4_lld_info *infop)
} }
ctx->lldi = *infop; ctx->lldi = *infop;
pr_debug("%s found device %s nchan %u nrxq %u ntxq %u nports %u\n", pr_debug("found device %s nchan %u nrxq %u ntxq %u nports %u\n",
__func__, pci_name(ctx->lldi.pdev), pci_name(ctx->lldi.pdev),
ctx->lldi.nchan, ctx->lldi.nrxq, ctx->lldi.nchan, ctx->lldi.nrxq,
ctx->lldi.ntxq, ctx->lldi.nports); ctx->lldi.ntxq, ctx->lldi.nports);
...@@ -1203,7 +1203,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state) ...@@ -1203,7 +1203,7 @@ static int c4iw_uld_state_change(void *handle, enum cxgb4_state new_state)
{ {
struct uld_ctx *ctx = handle; struct uld_ctx *ctx = handle;
pr_debug("%s new_state %u\n", __func__, new_state); pr_debug("new_state %u\n", new_state);
switch (new_state) { switch (new_state) {
case CXGB4_STATE_UP: case CXGB4_STATE_UP:
pr_info("%s: Up\n", pci_name(ctx->lldi.pdev)); pr_info("%s: Up\n", pci_name(ctx->lldi.pdev));
......
...@@ -234,7 +234,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) ...@@ -234,7 +234,7 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
if (atomic_dec_and_test(&chp->refcnt)) if (atomic_dec_and_test(&chp->refcnt))
wake_up(&chp->wait); wake_up(&chp->wait);
} else { } else {
pr_debug("%s unknown cqid 0x%x\n", __func__, qid); pr_warn("%s unknown cqid 0x%x\n", __func__, qid);
spin_unlock_irqrestore(&dev->lock, flag); spin_unlock_irqrestore(&dev->lock, flag);
} }
return 0; return 0;
......
...@@ -230,8 +230,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, ...@@ -230,8 +230,8 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev,
ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO); ret = wait_for_completion_timeout(&wr_waitp->completion, C4IW_WR_TO);
if (!ret) { if (!ret) {
pr_debug("%s - Device %s not responding (disabling device) - tid %u qpid %u\n", pr_err("%s - Device %s not responding (disabling device) - tid %u qpid %u\n",
func, pci_name(rdev->lldi.pdev), hwtid, qpid); func, pci_name(rdev->lldi.pdev), hwtid, qpid);
rdev->flags |= T4_FATAL_ERROR; rdev->flags |= T4_FATAL_ERROR;
wr_waitp->ret = -EIO; wr_waitp->ret = -EIO;
} }
...@@ -537,8 +537,7 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext, ...@@ -537,8 +537,7 @@ static inline struct c4iw_mm_entry *remove_mmap(struct c4iw_ucontext *ucontext,
if (mm->key == key && mm->len == len) { if (mm->key == key && mm->len == len) {
list_del_init(&mm->entry); list_del_init(&mm->entry);
spin_unlock(&ucontext->mmap_lock); spin_unlock(&ucontext->mmap_lock);
pr_debug("%s key 0x%x addr 0x%llx len %d\n", pr_debug("key 0x%x addr 0x%llx len %d\n", key,
__func__, key,
(unsigned long long)mm->addr, mm->len); (unsigned long long)mm->addr, mm->len);
return mm; return mm;
} }
...@@ -551,8 +550,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext, ...@@ -551,8 +550,8 @@ static inline void insert_mmap(struct c4iw_ucontext *ucontext,
struct c4iw_mm_entry *mm) struct c4iw_mm_entry *mm)
{ {
spin_lock(&ucontext->mmap_lock); spin_lock(&ucontext->mmap_lock);
pr_debug("%s key 0x%x addr 0x%llx len %d\n", pr_debug("key 0x%x addr 0x%llx len %d\n",
__func__, mm->key, (unsigned long long)mm->addr, mm->len); mm->key, (unsigned long long)mm->addr, mm->len);
list_add_tail(&mm->entry, &ucontext->mmaps); list_add_tail(&mm->entry, &ucontext->mmaps);
spin_unlock(&ucontext->mmap_lock); spin_unlock(&ucontext->mmap_lock);
} }
...@@ -671,16 +670,14 @@ enum c4iw_mmid_state { ...@@ -671,16 +670,14 @@ enum c4iw_mmid_state {
#define MPA_V2_IRD_ORD_MASK 0x3FFF #define MPA_V2_IRD_ORD_MASK 0x3FFF
#define c4iw_put_ep(ep) { \ #define c4iw_put_ep(ep) { \
pr_debug("put_ep (via %s:%u) ep %p refcnt %d\n", \ pr_debug("put_ep ep %p refcnt %d\n", \
__func__, __LINE__, \
ep, kref_read(&((ep)->kref))); \ ep, kref_read(&((ep)->kref))); \
WARN_ON(kref_read(&((ep)->kref)) < 1); \ WARN_ON(kref_read(&((ep)->kref)) < 1); \
kref_put(&((ep)->kref), _c4iw_free_ep); \ kref_put(&((ep)->kref), _c4iw_free_ep); \
} }
#define c4iw_get_ep(ep) { \ #define c4iw_get_ep(ep) { \
pr_debug("get_ep (via %s:%u) ep %p, refcnt %d\n", \ pr_debug("get_ep ep %p, refcnt %d\n", \
__func__, __LINE__, \
ep, kref_read(&((ep)->kref))); \ ep, kref_read(&((ep)->kref))); \
kref_get(&((ep)->kref)); \ kref_get(&((ep)->kref)); \
} }
......
...@@ -124,7 +124,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, ...@@ -124,7 +124,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F); cmd |= cpu_to_be32(T5_ULP_MEMIO_IMM_F);
addr &= 0x7FFFFFF; addr &= 0x7FFFFFF;
pr_debug("%s addr 0x%x len %u\n", __func__, addr, len); pr_debug("addr 0x%x len %u\n", addr, len);
num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE);
c4iw_init_wr_wait(&wr_wait); c4iw_init_wr_wait(&wr_wait);
for (i = 0; i < num_wqe; i++) { for (i = 0; i < num_wqe; i++) {
...@@ -285,8 +285,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, ...@@ -285,8 +285,8 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
*stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
} }
pr_debug("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", pr_debug("stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
__func__, stag_state, type, pdid, stag_idx); stag_state, type, pdid, stag_idx);
/* write TPT entry */ /* write TPT entry */
if (reset_tpt_entry) if (reset_tpt_entry)
...@@ -327,8 +327,8 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, ...@@ -327,8 +327,8 @@ static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl,
{ {
int err; int err;
pr_debug("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", pr_debug("*pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n",
__func__, pbl_addr, rdev->lldi.vr->pbl.start, pbl_addr, rdev->lldi.vr->pbl.start,
pbl_size); pbl_size);
err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL); err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl, NULL);
...@@ -372,7 +372,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) ...@@ -372,7 +372,7 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
mhp->attr.stag = stag; mhp->attr.stag = stag;
mmid = stag >> 8; mmid = stag >> 8;
mhp->ibmr.rkey = mhp->ibmr.lkey = stag; mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
pr_debug("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
} }
...@@ -422,7 +422,7 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) ...@@ -422,7 +422,7 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
int ret; int ret;
u32 stag = T4_STAG_UNSET; u32 stag = T4_STAG_UNSET;
pr_debug("%s ib_pd %p\n", __func__, pd); pr_debug("ib_pd %p\n", pd);
php = to_c4iw_pd(pd); php = to_c4iw_pd(pd);
rhp = php->rhp; rhp = php->rhp;
...@@ -479,7 +479,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -479,7 +479,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
struct c4iw_pd *php; struct c4iw_pd *php;
struct c4iw_mr *mhp; struct c4iw_mr *mhp;
pr_debug("%s ib_pd %p\n", __func__, pd); pr_debug("ib_pd %p\n", pd);
if (length == ~0ULL) if (length == ~0ULL)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -616,7 +616,7 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type, ...@@ -616,7 +616,7 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type,
ret = -ENOMEM; ret = -ENOMEM;
goto dealloc_win; goto dealloc_win;
} }
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmw); return &(mhp->ibmw);
dealloc_win: dealloc_win:
...@@ -641,7 +641,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw) ...@@ -641,7 +641,7 @@ int c4iw_dealloc_mw(struct ib_mw *mw)
deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb); deallocate_window(&rhp->rdev, mhp->attr.stag, mhp->dereg_skb);
kfree_skb(mhp->dereg_skb); kfree_skb(mhp->dereg_skb);
kfree(mhp); kfree(mhp);
pr_debug("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); pr_debug("ib_mw %p mmid 0x%x ptr %p\n", mw, mmid, mhp);
return 0; return 0;
} }
...@@ -699,7 +699,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, ...@@ -699,7 +699,7 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd,
goto err3; goto err3;
} }
pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
return &(mhp->ibmr); return &(mhp->ibmr);
err3: err3:
dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
...@@ -744,7 +744,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) ...@@ -744,7 +744,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
struct c4iw_mr *mhp; struct c4iw_mr *mhp;
u32 mmid; u32 mmid;
pr_debug("%s ib_mr %p\n", __func__, ib_mr); pr_debug("ib_mr %p\n", ib_mr);
mhp = to_c4iw_mr(ib_mr); mhp = to_c4iw_mr(ib_mr);
rhp = mhp->rhp; rhp = mhp->rhp;
...@@ -762,7 +762,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) ...@@ -762,7 +762,7 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr)
kfree((void *) (unsigned long) mhp->kva); kfree((void *) (unsigned long) mhp->kva);
if (mhp->umem) if (mhp->umem)
ib_umem_release(mhp->umem); ib_umem_release(mhp->umem);
pr_debug("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp); pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
kfree(mhp); kfree(mhp);
return 0; return 0;
} }
......
...@@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref) ...@@ -102,7 +102,7 @@ void _c4iw_free_ucontext(struct kref *kref)
ucontext = container_of(kref, struct c4iw_ucontext, kref); ucontext = container_of(kref, struct c4iw_ucontext, kref);
rhp = to_c4iw_dev(ucontext->ibucontext.device); rhp = to_c4iw_dev(ucontext->ibucontext.device);
pr_debug("%s ucontext %p\n", __func__, ucontext); pr_debug("ucontext %p\n", ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry) list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm); kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx); c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
...@@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context) ...@@ -113,7 +113,7 @@ static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
{ {
struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context); struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
pr_debug("%s context %p\n", __func__, context); pr_debug("context %p\n", context);
c4iw_put_ucontext(ucontext); c4iw_put_ucontext(ucontext);
return 0; return 0;
} }
...@@ -127,7 +127,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev, ...@@ -127,7 +127,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
int ret = 0; int ret = 0;
struct c4iw_mm_entry *mm = NULL; struct c4iw_mm_entry *mm = NULL;
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
context = kzalloc(sizeof(*context), GFP_KERNEL); context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context) { if (!context) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -185,7 +185,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) ...@@ -185,7 +185,7 @@ static int c4iw_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
struct c4iw_ucontext *ucontext; struct c4iw_ucontext *ucontext;
u64 addr; u64 addr;
pr_debug("%s pgoff 0x%lx key 0x%x len %d\n", __func__, vma->vm_pgoff, pr_debug("pgoff 0x%lx key 0x%x len %d\n", vma->vm_pgoff,
key, len); key, len);
if (vma->vm_start & (PAGE_SIZE-1)) if (vma->vm_start & (PAGE_SIZE-1))
...@@ -251,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd) ...@@ -251,7 +251,7 @@ static int c4iw_deallocate_pd(struct ib_pd *pd)
php = to_c4iw_pd(pd); php = to_c4iw_pd(pd);
rhp = php->rhp; rhp = php->rhp;
pr_debug("%s ibpd %p pdid 0x%x\n", __func__, pd, php->pdid); pr_debug("ibpd %p pdid 0x%x\n", pd, php->pdid);
c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid); c4iw_put_resource(&rhp->rdev.resource.pdid_table, php->pdid);
mutex_lock(&rhp->rdev.stats.lock); mutex_lock(&rhp->rdev.stats.lock);
rhp->rdev.stats.pd.cur--; rhp->rdev.stats.pd.cur--;
...@@ -268,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, ...@@ -268,7 +268,7 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
u32 pdid; u32 pdid;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
rhp = (struct c4iw_dev *) ibdev; rhp = (struct c4iw_dev *) ibdev;
pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table); pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table);
if (!pdid) if (!pdid)
...@@ -291,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev, ...@@ -291,14 +291,14 @@ static struct ib_pd *c4iw_allocate_pd(struct ib_device *ibdev,
if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max) if (rhp->rdev.stats.pd.cur > rhp->rdev.stats.pd.max)
rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur; rhp->rdev.stats.pd.max = rhp->rdev.stats.pd.cur;
mutex_unlock(&rhp->rdev.stats.lock); mutex_unlock(&rhp->rdev.stats.lock);
pr_debug("%s pdid 0x%0x ptr 0x%p\n", __func__, pdid, php); pr_debug("pdid 0x%0x ptr 0x%p\n", pdid, php);
return &php->ibpd; return &php->ibpd;
} }
static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index, static int c4iw_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
u16 *pkey) u16 *pkey)
{ {
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
*pkey = 0; *pkey = 0;
return 0; return 0;
} }
...@@ -308,8 +308,8 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index, ...@@ -308,8 +308,8 @@ static int c4iw_query_gid(struct ib_device *ibdev, u8 port, int index,
{ {
struct c4iw_dev *dev; struct c4iw_dev *dev;
pr_debug("%s ibdev %p, port %d, index %d, gid %p\n", pr_debug("ibdev %p, port %d, index %d, gid %p\n",
__func__, ibdev, port, index, gid); ibdev, port, index, gid);
dev = to_c4iw_dev(ibdev); dev = to_c4iw_dev(ibdev);
BUG_ON(port == 0); BUG_ON(port == 0);
memset(&(gid->raw[0]), 0, sizeof(gid->raw)); memset(&(gid->raw[0]), 0, sizeof(gid->raw));
...@@ -323,7 +323,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro ...@@ -323,7 +323,7 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro
struct c4iw_dev *dev; struct c4iw_dev *dev;
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
if (uhw->inlen || uhw->outlen) if (uhw->inlen || uhw->outlen)
return -EINVAL; return -EINVAL;
...@@ -364,7 +364,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, ...@@ -364,7 +364,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
struct net_device *netdev; struct net_device *netdev;
struct in_device *inetdev; struct in_device *inetdev;
pr_debug("%s ibdev %p\n", __func__, ibdev); pr_debug("ibdev %p\n", ibdev);
dev = to_c4iw_dev(ibdev); dev = to_c4iw_dev(ibdev);
netdev = dev->rdev.lldi.ports[port-1]; netdev = dev->rdev.lldi.ports[port-1];
...@@ -406,7 +406,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr, ...@@ -406,7 +406,7 @@ static ssize_t show_rev(struct device *dev, struct device_attribute *attr,
{ {
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev); ibdev.dev);
pr_debug("%s dev 0x%p\n", __func__, dev); pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%d\n", return sprintf(buf, "%d\n",
CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type)); CHELSIO_CHIP_RELEASE(c4iw_dev->rdev.lldi.adapter_type));
} }
...@@ -419,7 +419,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr, ...@@ -419,7 +419,7 @@ static ssize_t show_hca(struct device *dev, struct device_attribute *attr,
struct ethtool_drvinfo info; struct ethtool_drvinfo info;
struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0]; struct net_device *lldev = c4iw_dev->rdev.lldi.ports[0];
pr_debug("%s dev 0x%p\n", __func__, dev); pr_debug("dev 0x%p\n", dev);
lldev->ethtool_ops->get_drvinfo(lldev, &info); lldev->ethtool_ops->get_drvinfo(lldev, &info);
return sprintf(buf, "%s\n", info.driver); return sprintf(buf, "%s\n", info.driver);
} }
...@@ -429,7 +429,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr, ...@@ -429,7 +429,7 @@ static ssize_t show_board(struct device *dev, struct device_attribute *attr,
{ {
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev.dev); ibdev.dev);
pr_debug("%s dev 0x%p\n", __func__, dev); pr_debug("dev 0x%p\n", dev);
return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor, return sprintf(buf, "%x.%x\n", c4iw_dev->rdev.lldi.pdev->vendor,
c4iw_dev->rdev.lldi.pdev->device); c4iw_dev->rdev.lldi.pdev->device);
} }
...@@ -521,7 +521,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str) ...@@ -521,7 +521,7 @@ static void get_dev_fw_str(struct ib_device *dev, char *str)
{ {
struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev, struct c4iw_dev *c4iw_dev = container_of(dev, struct c4iw_dev,
ibdev); ibdev);
pr_debug("%s dev 0x%p\n", __func__, dev); pr_debug("dev 0x%p\n", dev);
snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u", snprintf(str, IB_FW_VERSION_NAME_MAX, "%u.%u.%u.%u",
FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers), FW_HDR_FW_VER_MAJOR_G(c4iw_dev->rdev.lldi.fw_vers),
...@@ -535,7 +535,7 @@ int c4iw_register_device(struct c4iw_dev *dev) ...@@ -535,7 +535,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
int ret; int ret;
int i; int i;
pr_debug("%s c4iw_dev %p\n", __func__, dev); pr_debug("c4iw_dev %p\n", dev);
BUG_ON(!dev->rdev.lldi.ports[0]); BUG_ON(!dev->rdev.lldi.ports[0]);
strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX); strlcpy(dev->ibdev.name, "cxgb4_%d", IB_DEVICE_NAME_MAX);
memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid));
...@@ -645,7 +645,7 @@ void c4iw_unregister_device(struct c4iw_dev *dev) ...@@ -645,7 +645,7 @@ void c4iw_unregister_device(struct c4iw_dev *dev)
{ {
int i; int i;
pr_debug("%s c4iw_dev %p\n", __func__, dev); pr_debug("c4iw_dev %p\n", dev);
for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i) for (i = 0; i < ARRAY_SIZE(c4iw_class_attributes); ++i)
device_remove_file(&dev->ibdev.dev, device_remove_file(&dev->ibdev.dev,
c4iw_class_attributes[i]); c4iw_class_attributes[i]);
......
...@@ -254,8 +254,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -254,8 +254,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
ret = -ENOMEM; ret = -ENOMEM;
goto free_sq; goto free_sq;
} }
pr_debug("%s sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n", pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
__func__, wq->sq.queue, wq->sq.queue,
(unsigned long long)virt_to_phys(wq->sq.queue), (unsigned long long)virt_to_phys(wq->sq.queue),
wq->rq.queue, wq->rq.queue,
(unsigned long long)virt_to_phys(wq->rq.queue)); (unsigned long long)virt_to_phys(wq->rq.queue));
...@@ -361,8 +361,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, ...@@ -361,8 +361,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
if (ret) if (ret)
goto free_dma; goto free_dma;
pr_debug("%s sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n", pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
__func__, wq->sq.qid, wq->rq.qid, wq->db, wq->sq.qid, wq->rq.qid, wq->db,
wq->sq.bar2_va, wq->rq.bar2_va); wq->sq.bar2_va, wq->rq.bar2_va);
return 0; return 0;
...@@ -724,7 +724,7 @@ static void free_qp_work(struct work_struct *work) ...@@ -724,7 +724,7 @@ static void free_qp_work(struct work_struct *work)
ucontext = qhp->ucontext; ucontext = qhp->ucontext;
rhp = qhp->rhp; rhp = qhp->rhp;
pr_debug("%s qhp %p ucontext %p\n", __func__, qhp, ucontext); pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
destroy_qp(&rhp->rdev, &qhp->wq, destroy_qp(&rhp->rdev, &qhp->wq,
ucontext ? &ucontext->uctx : &rhp->rdev.uctx); ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
...@@ -738,19 +738,19 @@ static void queue_qp_free(struct kref *kref) ...@@ -738,19 +738,19 @@ static void queue_qp_free(struct kref *kref)
struct c4iw_qp *qhp; struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref); qhp = container_of(kref, struct c4iw_qp, kref);
pr_debug("%s qhp %p\n", __func__, qhp); pr_debug("qhp %p\n", qhp);
queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work); queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
} }
void c4iw_qp_add_ref(struct ib_qp *qp) void c4iw_qp_add_ref(struct ib_qp *qp)
{ {
pr_debug("%s ib_qp %p\n", __func__, qp); pr_debug("ib_qp %p\n", qp);
kref_get(&to_c4iw_qp(qp)->kref); kref_get(&to_c4iw_qp(qp)->kref);
} }
void c4iw_qp_rem_ref(struct ib_qp *qp) void c4iw_qp_rem_ref(struct ib_qp *qp)
{ {
pr_debug("%s ib_qp %p\n", __func__, qp); pr_debug("ib_qp %p\n", qp);
kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free); kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
} }
...@@ -958,8 +958,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -958,8 +958,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey); c4iw_invalidate_mr(qhp->rhp, wr->ex.invalidate_rkey);
break; break;
default: default:
pr_debug("%s post of type=%d TBD!\n", __func__, pr_warn("%s post of type=%d TBD!\n", __func__,
wr->opcode); wr->opcode);
err = -EINVAL; err = -EINVAL;
} }
if (err) { if (err) {
...@@ -980,8 +980,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -980,8 +980,7 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16); init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
pr_debug("%s cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n", pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
__func__,
(unsigned long long)wr->wr_id, qhp->wq.sq.pidx, (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
swsqe->opcode, swsqe->read_len); swsqe->opcode, swsqe->read_len);
wr = wr->next; wr = wr->next;
...@@ -1057,8 +1056,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -1057,8 +1056,7 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
wqe->recv.r2[1] = 0; wqe->recv.r2[1] = 0;
wqe->recv.r2[2] = 0; wqe->recv.r2[2] = 0;
wqe->recv.len16 = len16; wqe->recv.len16 = len16;
pr_debug("%s cookie 0x%llx pidx %u\n", pr_debug("cookie 0x%llx pidx %u\n",
__func__,
(unsigned long long)wr->wr_id, qhp->wq.rq.pidx); (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
t4_rq_produce(&qhp->wq, len16); t4_rq_produce(&qhp->wq, len16);
idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
...@@ -1218,7 +1216,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, ...@@ -1218,7 +1216,7 @@ static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
struct sk_buff *skb; struct sk_buff *skb;
struct terminate_message *term; struct terminate_message *term;
pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
qhp->ep->hwtid); qhp->ep->hwtid);
skb = skb_dequeue(&qhp->ep->com.ep_skb_list); skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
...@@ -1255,7 +1253,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, ...@@ -1255,7 +1253,7 @@ static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
int rq_flushed, sq_flushed; int rq_flushed, sq_flushed;
unsigned long flag; unsigned long flag;
pr_debug("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
/* locking hierarchy: cq lock first, then qp lock. */ /* locking hierarchy: cq lock first, then qp lock. */
spin_lock_irqsave(&rchp->lock, flag); spin_lock_irqsave(&rchp->lock, flag);
...@@ -1340,8 +1338,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1340,8 +1338,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int ret; int ret;
struct sk_buff *skb; struct sk_buff *skb;
pr_debug("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid, pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
ep->hwtid);
skb = skb_dequeue(&ep->com.ep_skb_list); skb = skb_dequeue(&ep->com.ep_skb_list);
if (WARN_ON(!skb)) if (WARN_ON(!skb))
...@@ -1367,13 +1364,13 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1367,13 +1364,13 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid, ret = c4iw_wait_for_reply(&rhp->rdev, &ep->com.wr_wait, qhp->ep->hwtid,
qhp->wq.sq.qid, __func__); qhp->wq.sq.qid, __func__);
out: out:
pr_debug("%s ret %d\n", __func__, ret); pr_debug("ret %d\n", ret);
return ret; return ret;
} }
static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init) static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
{ {
pr_debug("%s p2p_type = %d\n", __func__, p2p_type); pr_debug("p2p_type = %d\n", p2p_type);
memset(&init->u, 0, sizeof init->u); memset(&init->u, 0, sizeof init->u);
switch (p2p_type) { switch (p2p_type) {
case FW_RI_INIT_P2PTYPE_RDMA_WRITE: case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
...@@ -1402,7 +1399,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) ...@@ -1402,7 +1399,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
int ret; int ret;
struct sk_buff *skb; struct sk_buff *skb;
pr_debug("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp, pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord); qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
skb = alloc_skb(sizeof *wqe, GFP_KERNEL); skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
...@@ -1475,7 +1472,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) ...@@ -1475,7 +1472,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
err1: err1:
free_ird(rhp, qhp->attr.max_ird); free_ird(rhp, qhp->attr.max_ird);
out: out:
pr_debug("%s ret %d\n", __func__, ret); pr_debug("ret %d\n", ret);
return ret; return ret;
} }
...@@ -1492,8 +1489,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1492,8 +1489,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
int free = 0; int free = 0;
struct c4iw_ep *ep = NULL; struct c4iw_ep *ep = NULL;
pr_debug("%s qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n", pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
__func__,
qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state, qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
(mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1); (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
...@@ -1680,7 +1676,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1680,7 +1676,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
} }
goto out; goto out;
err: err:
pr_debug("%s disassociating ep %p qpid 0x%x\n", __func__, qhp->ep, pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
qhp->wq.sq.qid); qhp->wq.sq.qid);
/* disassociate the LLP connection */ /* disassociate the LLP connection */
...@@ -1717,7 +1713,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, ...@@ -1717,7 +1713,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
*/ */
if (free) if (free)
c4iw_put_ep(&ep->com); c4iw_put_ep(&ep->com);
pr_debug("%s exit state %d\n", __func__, qhp->attr.state); pr_debug("exit state %d\n", qhp->attr.state);
return ret; return ret;
} }
...@@ -1747,7 +1743,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp) ...@@ -1747,7 +1743,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
c4iw_qp_rem_ref(ib_qp); c4iw_qp_rem_ref(ib_qp);
pr_debug("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid); pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
return 0; return 0;
} }
...@@ -1766,7 +1762,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1766,7 +1762,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm; struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL; struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
pr_debug("%s ib_pd %p\n", __func__, pd); pr_debug("ib_pd %p\n", pd);
if (attrs->qp_type != IB_QPT_RC) if (attrs->qp_type != IB_QPT_RC)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -1937,8 +1933,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs, ...@@ -1937,8 +1933,7 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->ibqp.qp_num = qhp->wq.sq.qid; qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer)); init_timer(&(qhp->timer));
INIT_LIST_HEAD(&qhp->db_fc_entry); INIT_LIST_HEAD(&qhp->db_fc_entry);
pr_debug("%s sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n", pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
__func__,
qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize, qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size, attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
qhp->wq.rq.memsize, attrs->cap.max_recv_wr); qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
...@@ -1971,7 +1966,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -1971,7 +1966,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
enum c4iw_qp_attr_mask mask = 0; enum c4iw_qp_attr_mask mask = 0;
struct c4iw_qp_attributes attrs; struct c4iw_qp_attributes attrs;
pr_debug("%s ib_qp %p\n", __func__, ibqp); pr_debug("ib_qp %p\n", ibqp);
/* iwarp does not support the RTR state */ /* iwarp does not support the RTR state */
if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR)) if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
...@@ -2017,7 +2012,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -2017,7 +2012,7 @@ int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn) struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
{ {
pr_debug("%s ib_dev %p qpn 0x%x\n", __func__, dev, qpn); pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn); return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
} }
......
...@@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table) ...@@ -90,7 +90,7 @@ u32 c4iw_get_resource(struct c4iw_id_table *id_table)
void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry) void c4iw_put_resource(struct c4iw_id_table *id_table, u32 entry)
{ {
pr_debug("%s entry 0x%x\n", __func__, entry); pr_debug("entry 0x%x\n", entry);
c4iw_id_free(id_table, entry); c4iw_id_free(id_table, entry);
} }
...@@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) ...@@ -141,7 +141,7 @@ u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
} }
out: out:
mutex_unlock(&uctx->lock); mutex_unlock(&uctx->lock);
pr_debug("%s qid 0x%x\n", __func__, qid); pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max) if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur; rdev->stats.qid.max = rdev->stats.qid.cur;
...@@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid, ...@@ -157,7 +157,7 @@ void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry) if (!entry)
return; return;
pr_debug("%s qid 0x%x\n", __func__, qid); pr_debug("qid 0x%x\n", qid);
entry->qid = qid; entry->qid = qid;
mutex_lock(&uctx->lock); mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->cqids); list_add_tail(&entry->entry, &uctx->cqids);
...@@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx) ...@@ -215,7 +215,7 @@ u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
} }
out: out:
mutex_unlock(&uctx->lock); mutex_unlock(&uctx->lock);
pr_debug("%s qid 0x%x\n", __func__, qid); pr_debug("qid 0x%x\n", qid);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
if (rdev->stats.qid.cur > rdev->stats.qid.max) if (rdev->stats.qid.cur > rdev->stats.qid.max)
rdev->stats.qid.max = rdev->stats.qid.cur; rdev->stats.qid.max = rdev->stats.qid.cur;
...@@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid, ...@@ -231,7 +231,7 @@ void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
entry = kmalloc(sizeof *entry, GFP_KERNEL); entry = kmalloc(sizeof *entry, GFP_KERNEL);
if (!entry) if (!entry)
return; return;
pr_debug("%s qid 0x%x\n", __func__, qid); pr_debug("qid 0x%x\n", qid);
entry->qid = qid; entry->qid = qid;
mutex_lock(&uctx->lock); mutex_lock(&uctx->lock);
list_add_tail(&entry->entry, &uctx->qpids); list_add_tail(&entry->entry, &uctx->qpids);
...@@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp) ...@@ -254,7 +254,7 @@ void c4iw_destroy_resource(struct c4iw_resource *rscp)
u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
{ {
unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size); unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size); pr_debug("addr 0x%x size %d\n", (u32)addr, size);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
if (addr) { if (addr) {
rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT); rdev->stats.pbl.cur += roundup(size, 1 << MIN_PBL_SHIFT);
...@@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size) ...@@ -268,7 +268,7 @@ u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size) void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{ {
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size); pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT); rdev->stats.pbl.cur -= roundup(size, 1 << MIN_PBL_SHIFT);
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
...@@ -290,8 +290,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev) ...@@ -290,8 +290,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
while (pbl_start < pbl_top) { while (pbl_start < pbl_top) {
pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk); pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) { if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
pr_debug("%s failed to add PBL chunk (%x/%x)\n", pr_debug("failed to add PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk); pbl_start, pbl_chunk);
if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) { if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
pr_warn("Failed to add all PBL chunks (%x/%x)\n", pr_warn("Failed to add all PBL chunks (%x/%x)\n",
pbl_start, pbl_top - pbl_start); pbl_start, pbl_top - pbl_start);
...@@ -299,8 +299,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev) ...@@ -299,8 +299,8 @@ int c4iw_pblpool_create(struct c4iw_rdev *rdev)
} }
pbl_chunk >>= 1; pbl_chunk >>= 1;
} else { } else {
pr_debug("%s added PBL chunk (%x/%x)\n", pr_debug("added PBL chunk (%x/%x)\n",
__func__, pbl_start, pbl_chunk); pbl_start, pbl_chunk);
pbl_start += pbl_chunk; pbl_start += pbl_chunk;
} }
} }
...@@ -322,7 +322,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev) ...@@ -322,7 +322,7 @@ void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
{ {
unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6); unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6); pr_debug("addr 0x%x size %d\n", (u32)addr, size << 6);
if (!addr) if (!addr)
pr_warn_ratelimited("%s: Out of RQT memory\n", pr_warn_ratelimited("%s: Out of RQT memory\n",
pci_name(rdev->lldi.pdev)); pci_name(rdev->lldi.pdev));
...@@ -339,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size) ...@@ -339,7 +339,7 @@ u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size) void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{ {
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size << 6); pr_debug("addr 0x%x size %d\n", addr, size << 6);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT); rdev->stats.rqt.cur -= roundup(size << 6, 1 << MIN_RQT_SHIFT);
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
...@@ -361,8 +361,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev) ...@@ -361,8 +361,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
while (rqt_start < rqt_top) { while (rqt_start < rqt_top) {
rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk); rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) { if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
pr_debug("%s failed to add RQT chunk (%x/%x)\n", pr_debug("failed to add RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk); rqt_start, rqt_chunk);
if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) { if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
pr_warn("Failed to add all RQT chunks (%x/%x)\n", pr_warn("Failed to add all RQT chunks (%x/%x)\n",
rqt_start, rqt_top - rqt_start); rqt_start, rqt_top - rqt_start);
...@@ -370,8 +370,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev) ...@@ -370,8 +370,8 @@ int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
} }
rqt_chunk >>= 1; rqt_chunk >>= 1;
} else { } else {
pr_debug("%s added RQT chunk (%x/%x)\n", pr_debug("added RQT chunk (%x/%x)\n",
__func__, rqt_start, rqt_chunk); rqt_start, rqt_chunk);
rqt_start += rqt_chunk; rqt_start += rqt_chunk;
} }
} }
...@@ -391,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev) ...@@ -391,7 +391,7 @@ void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
{ {
unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size); unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
pr_debug("%s addr 0x%x size %d\n", __func__, (u32)addr, size); pr_debug("addr 0x%x size %d\n", (u32)addr, size);
if (addr) { if (addr) {
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT); rdev->stats.ocqp.cur += roundup(size, 1 << MIN_OCQP_SHIFT);
...@@ -404,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size) ...@@ -404,7 +404,7 @@ u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size) void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
{ {
pr_debug("%s addr 0x%x size %d\n", __func__, addr, size); pr_debug("addr 0x%x size %d\n", addr, size);
mutex_lock(&rdev->stats.lock); mutex_lock(&rdev->stats.lock);
rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT); rdev->stats.ocqp.cur -= roundup(size, 1 << MIN_OCQP_SHIFT);
mutex_unlock(&rdev->stats.lock); mutex_unlock(&rdev->stats.lock);
...@@ -426,8 +426,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) ...@@ -426,8 +426,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
while (start < top) { while (start < top) {
chunk = min(top - start + 1, chunk); chunk = min(top - start + 1, chunk);
if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) { if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
pr_debug("%s failed to add OCQP chunk (%x/%x)\n", pr_debug("failed to add OCQP chunk (%x/%x)\n",
__func__, start, chunk); start, chunk);
if (chunk <= 1024 << MIN_OCQP_SHIFT) { if (chunk <= 1024 << MIN_OCQP_SHIFT) {
pr_warn("Failed to add all OCQP chunks (%x/%x)\n", pr_warn("Failed to add all OCQP chunks (%x/%x)\n",
start, top - start); start, top - start);
...@@ -435,8 +435,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev) ...@@ -435,8 +435,8 @@ int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
} }
chunk >>= 1; chunk >>= 1;
} else { } else {
pr_debug("%s added OCQP chunk (%x/%x)\n", pr_debug("added OCQP chunk (%x/%x)\n",
__func__, start, chunk); start, chunk);
start += chunk; start += chunk;
} }
} }
......
...@@ -466,14 +466,12 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe) ...@@ -466,14 +466,12 @@ static inline void t4_ring_sq_db(struct t4_wq *wq, u16 inc, union t4_wr *wqe)
wmb(); wmb();
if (wq->sq.bar2_va) { if (wq->sq.bar2_va) {
if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) { if (inc == 1 && wq->sq.bar2_qid == 0 && wqe) {
pr_debug("%s: WC wq->sq.pidx = %d\n", pr_debug("WC wq->sq.pidx = %d\n", wq->sq.pidx);
__func__, wq->sq.pidx);
pio_copy((u64 __iomem *) pio_copy((u64 __iomem *)
(wq->sq.bar2_va + SGE_UDB_WCDOORBELL), (wq->sq.bar2_va + SGE_UDB_WCDOORBELL),
(u64 *)wqe); (u64 *)wqe);
} else { } else {
pr_debug("%s: DB wq->sq.pidx = %d\n", pr_debug("DB wq->sq.pidx = %d\n", wq->sq.pidx);
__func__, wq->sq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid), writel(PIDX_T5_V(inc) | QID_V(wq->sq.bar2_qid),
wq->sq.bar2_va + SGE_UDB_KDOORBELL); wq->sq.bar2_va + SGE_UDB_KDOORBELL);
} }
...@@ -493,14 +491,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc, ...@@ -493,14 +491,12 @@ static inline void t4_ring_rq_db(struct t4_wq *wq, u16 inc,
wmb(); wmb();
if (wq->rq.bar2_va) { if (wq->rq.bar2_va) {
if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) { if (inc == 1 && wq->rq.bar2_qid == 0 && wqe) {
pr_debug("%s: WC wq->rq.pidx = %d\n", pr_debug("WC wq->rq.pidx = %d\n", wq->rq.pidx);
__func__, wq->rq.pidx);
pio_copy((u64 __iomem *) pio_copy((u64 __iomem *)
(wq->rq.bar2_va + SGE_UDB_WCDOORBELL), (wq->rq.bar2_va + SGE_UDB_WCDOORBELL),
(void *)wqe); (void *)wqe);
} else { } else {
pr_debug("%s: DB wq->rq.pidx = %d\n", pr_debug("DB wq->rq.pidx = %d\n", wq->rq.pidx);
__func__, wq->rq.pidx);
writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid), writel(PIDX_T5_V(inc) | QID_V(wq->rq.bar2_qid),
wq->rq.bar2_va + SGE_UDB_KDOORBELL); wq->rq.bar2_va + SGE_UDB_KDOORBELL);
} }
...@@ -601,8 +597,8 @@ static inline void t4_swcq_produce(struct t4_cq *cq) ...@@ -601,8 +597,8 @@ static inline void t4_swcq_produce(struct t4_cq *cq)
{ {
cq->sw_in_use++; cq->sw_in_use++;
if (cq->sw_in_use == cq->size) { if (cq->sw_in_use == cq->size) {
pr_debug("%s cxgb4 sw cq overflow cqid %u\n", pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
__func__, cq->cqid); __func__, cq->cqid);
cq->error = 1; cq->error = 1;
BUG_ON(1); BUG_ON(1);
} }
...@@ -673,8 +669,8 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe) ...@@ -673,8 +669,8 @@ static inline int t4_next_hw_cqe(struct t4_cq *cq, struct t4_cqe **cqe)
static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq) static inline struct t4_cqe *t4_next_sw_cqe(struct t4_cq *cq)
{ {
if (cq->sw_in_use == cq->size) { if (cq->sw_in_use == cq->size) {
pr_debug("%s cxgb4 sw cq overflow cqid %u\n", pr_warn("%s cxgb4 sw cq overflow cqid %u\n",
__func__, cq->cqid); __func__, cq->cqid);
cq->error = 1; cq->error = 1;
BUG_ON(1); BUG_ON(1);
return NULL; return NULL;
......
config INFINIBAND_HNS config INFINIBAND_HNS
tristate "HNS RoCE Driver" tristate "HNS RoCE Driver"
depends on NET_VENDOR_HISILICON depends on NET_VENDOR_HISILICON
depends on (ARM64 || (COMPILE_TEST && 64BIT)) && HNS && HNS_DSAF && HNS_ENET depends on ARM64 || (COMPILE_TEST && 64BIT)
---help--- ---help---
This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine
is used in Hisilicon Hi1610 and more further ICT SoC. is used in Hisilicon Hip06 and more further ICT SoC based on
platform device.
To compile this driver as a module, choose M here: the module To compile this driver as a module, choose M here: the module
will be called hns-roce. will be called hns-roce.
config INFINIBAND_HNS_HIP06
tristate "Hisilicon Hip06 Family RoCE support"
depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and
Hip07 SoC. These RoCE engines are platform devices.
To compile this driver as a module, choose M here: the module
will be called hns-roce-hw-v1.
config INFINIBAND_HNS_HIP08
tristate "Hisilicon Hip08 Family RoCE support"
depends on INFINIBAND_HNS && PCI && HNS3
---help---
RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip08 SoC.
The RoCE engine is a PCI device.
To compile this driver as a module, choose M here: the module
will be called hns-roce-hw-v2.
...@@ -2,7 +2,13 @@ ...@@ -2,7 +2,13 @@
# Makefile for the Hisilicon RoCE drivers. # Makefile for the Hisilicon RoCE drivers.
# #
ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3
obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o obj-$(CONFIG_INFINIBAND_HNS) += hns-roce.o
hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_eq.o hns_roce_pd.o \
hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \
hns_roce_cq.o hns_roce_alloc.o hns_roce_hw_v1.o hns_roce_cq.o hns_roce_alloc.o
obj-$(CONFIG_INFINIBAND_HNS_HIP06) += hns-roce-hw-v1.o
hns-roce-hw-v1-objs := hns_roce_hw_v1.o
obj-$(CONFIG_INFINIBAND_HNS_HIP08) += hns-roce-hw-v2.o
hns-roce-hw-v2-objs := hns_roce_hw_v2.o
...@@ -44,7 +44,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd, ...@@ -44,7 +44,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibpd->device);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct ib_gid_attr gid_attr; struct ib_gid_attr gid_attr;
struct hns_roce_ah *ah; struct hns_roce_ah *ah;
u16 vlan_tag = 0xffff; u16 vlan_tag = 0xffff;
......
...@@ -67,6 +67,7 @@ void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj, ...@@ -67,6 +67,7 @@ void hns_roce_bitmap_free(struct hns_roce_bitmap *bitmap, unsigned long obj,
{ {
hns_roce_bitmap_free_range(bitmap, obj, 1, rr); hns_roce_bitmap_free_range(bitmap, obj, 1, rr);
} }
EXPORT_SYMBOL_GPL(hns_roce_bitmap_free);
int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt, int hns_roce_bitmap_alloc_range(struct hns_roce_bitmap *bitmap, int cnt,
int align, unsigned long *obj) int align, unsigned long *obj)
...@@ -160,7 +161,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, ...@@ -160,7 +161,7 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
struct hns_roce_buf *buf) struct hns_roce_buf *buf)
{ {
int i; int i;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG; u32 bits_per_long = BITS_PER_LONG;
if (buf->nbufs == 1) { if (buf->nbufs == 1) {
...@@ -171,12 +172,13 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size, ...@@ -171,12 +172,13 @@ void hns_roce_buf_free(struct hns_roce_dev *hr_dev, u32 size,
for (i = 0; i < buf->nbufs; ++i) for (i = 0; i < buf->nbufs; ++i)
if (buf->page_list[i].buf) if (buf->page_list[i].buf)
dma_free_coherent(&hr_dev->pdev->dev, PAGE_SIZE, dma_free_coherent(dev, PAGE_SIZE,
buf->page_list[i].buf, buf->page_list[i].buf,
buf->page_list[i].map); buf->page_list[i].map);
kfree(buf->page_list); kfree(buf->page_list);
} }
} }
EXPORT_SYMBOL_GPL(hns_roce_buf_free);
int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
struct hns_roce_buf *buf) struct hns_roce_buf *buf)
...@@ -184,7 +186,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct, ...@@ -184,7 +186,7 @@ int hns_roce_buf_alloc(struct hns_roce_dev *hr_dev, u32 size, u32 max_direct,
int i = 0; int i = 0;
dma_addr_t t; dma_addr_t t;
struct page **pages; struct page **pages;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
u32 bits_per_long = BITS_PER_LONG; u32 bits_per_long = BITS_PER_LONG;
/* SQ/RQ buf lease than one page, SQ + RQ = 8K */ /* SQ/RQ buf lease than one page, SQ + RQ = 8K */
......
...@@ -38,69 +38,7 @@ ...@@ -38,69 +38,7 @@
#define CMD_POLL_TOKEN 0xffff #define CMD_POLL_TOKEN 0xffff
#define CMD_MAX_NUM 32 #define CMD_MAX_NUM 32
#define STATUS_MASK 0xff
#define CMD_TOKEN_MASK 0x1f #define CMD_TOKEN_MASK 0x1f
#define GO_BIT_TIMEOUT_MSECS 10000
enum {
HCR_TOKEN_OFFSET = 0x14,
HCR_STATUS_OFFSET = 0x18,
HCR_GO_BIT = 15,
};
static int cmd_pending(struct hns_roce_dev *hr_dev)
{
u32 status = readl(hr_dev->cmd.hcr + HCR_TOKEN_OFFSET);
return (!!(status & (1 << HCR_GO_BIT)));
}
/* this function should be serialized with "hcr_mutex" */
static int __hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev,
u64 in_param, u64 out_param,
u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event)
{
struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct device *dev = &hr_dev->pdev->dev;
u32 __iomem *hcr = (u32 *)cmd->hcr;
int ret = -EAGAIN;
unsigned long end;
u32 val = 0;
end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies;
while (cmd_pending(hr_dev)) {
if (time_after(jiffies, end)) {
dev_dbg(dev, "jiffies=%d end=%d\n", (int)jiffies,
(int)end);
goto out;
}
cond_resched();
}
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S,
op);
roce_set_field(val, ROCEE_MB6_ROCEE_MB_CMD_MDF_M,
ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier);
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_EVENT_S, event);
roce_set_bit(val, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1);
roce_set_field(val, ROCEE_MB6_ROCEE_MB_TOKEN_M,
ROCEE_MB6_ROCEE_MB_TOKEN_S, token);
__raw_writeq(cpu_to_le64(in_param), hcr + 0);
__raw_writeq(cpu_to_le64(out_param), hcr + 2);
__raw_writel(cpu_to_le32(in_modifier), hcr + 4);
/* Memory barrier */
wmb();
__raw_writel(cpu_to_le32(val), hcr + 5);
mmiowb();
ret = 0;
out:
return ret;
}
static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u64 out_param, u32 in_modifier,
...@@ -108,12 +46,11 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -108,12 +46,11 @@ static int hns_roce_cmd_mbox_post_hw(struct hns_roce_dev *hr_dev, u64 in_param,
int event) int event)
{ {
struct hns_roce_cmdq *cmd = &hr_dev->cmd; struct hns_roce_cmdq *cmd = &hr_dev->cmd;
int ret = -EAGAIN; int ret;
mutex_lock(&cmd->hcr_mutex); mutex_lock(&cmd->hcr_mutex);
ret = __hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, ret = hr_dev->hw->post_mbox(hr_dev, in_param, out_param, in_modifier,
in_modifier, op_modifier, op, token, op_modifier, op, token, event);
event);
mutex_unlock(&cmd->hcr_mutex); mutex_unlock(&cmd->hcr_mutex);
return ret; return ret;
...@@ -125,10 +62,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -125,10 +62,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
u8 op_modifier, u16 op, u8 op_modifier, u16 op,
unsigned long timeout) unsigned long timeout)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
u8 __iomem *hcr = hr_dev->cmd.hcr;
unsigned long end = 0;
u32 status = 0;
int ret; int ret;
ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param, ret = hns_roce_cmd_mbox_post_hw(hr_dev, in_param, out_param,
...@@ -136,29 +70,10 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -136,29 +70,10 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
CMD_POLL_TOKEN, 0); CMD_POLL_TOKEN, 0);
if (ret) { if (ret) {
dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n"); dev_err(dev, "[cmd_poll]hns_roce_cmd_mbox_post_hw failed\n");
goto out; return ret;
}
end = msecs_to_jiffies(timeout) + jiffies;
while (cmd_pending(hr_dev) && time_before(jiffies, end))
cond_resched();
if (cmd_pending(hr_dev)) {
dev_err(dev, "[cmd_poll]hw run cmd TIMEDOUT!\n");
ret = -ETIMEDOUT;
goto out;
} }
status = le32_to_cpu((__force __be32) return hr_dev->hw->chk_mbox(hr_dev, timeout);
__raw_readl(hcr + HCR_STATUS_OFFSET));
if ((status & STATUS_MASK) != 0x1) {
dev_err(dev, "mailbox status 0x%x!\n", status);
ret = -EBUSY;
goto out;
}
out:
return ret;
} }
static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, static int hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param,
...@@ -196,9 +111,9 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ...@@ -196,9 +111,9 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param,
unsigned long timeout) unsigned long timeout)
{ {
struct hns_roce_cmdq *cmd = &hr_dev->cmd; struct hns_roce_cmdq *cmd = &hr_dev->cmd;
struct device *dev = &hr_dev->pdev->dev;
struct hns_roce_cmd_context *context; struct hns_roce_cmd_context *context;
int ret = 0; struct device *dev = hr_dev->dev;
int ret;
spin_lock(&cmd->context_lock); spin_lock(&cmd->context_lock);
WARN_ON(cmd->free_head < 0); WARN_ON(cmd->free_head < 0);
...@@ -269,17 +184,17 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param, ...@@ -269,17 +184,17 @@ int hns_roce_cmd_mbox(struct hns_roce_dev *hr_dev, u64 in_param, u64 out_param,
in_modifier, op_modifier, op, in_modifier, op_modifier, op,
timeout); timeout);
} }
EXPORT_SYMBOL_GPL(hns_roce_cmd_mbox);
int hns_roce_cmd_init(struct hns_roce_dev *hr_dev) int hns_roce_cmd_init(struct hns_roce_dev *hr_dev)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
mutex_init(&hr_dev->cmd.hcr_mutex); mutex_init(&hr_dev->cmd.hcr_mutex);
sema_init(&hr_dev->cmd.poll_sem, 1); sema_init(&hr_dev->cmd.poll_sem, 1);
hr_dev->cmd.use_events = 0; hr_dev->cmd.use_events = 0;
hr_dev->cmd.toggle = 1; hr_dev->cmd.toggle = 1;
hr_dev->cmd.max_cmds = CMD_MAX_NUM; hr_dev->cmd.max_cmds = CMD_MAX_NUM;
hr_dev->cmd.hcr = hr_dev->reg_base + ROCEE_MB1_REG;
hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev, hr_dev->cmd.pool = dma_pool_create("hns_roce_cmd", dev,
HNS_ROCE_MAILBOX_SIZE, HNS_ROCE_MAILBOX_SIZE,
HNS_ROCE_MAILBOX_SIZE, 0); HNS_ROCE_MAILBOX_SIZE, 0);
...@@ -356,6 +271,7 @@ struct hns_roce_cmd_mailbox ...@@ -356,6 +271,7 @@ struct hns_roce_cmd_mailbox
return mailbox; return mailbox;
} }
EXPORT_SYMBOL_GPL(hns_roce_alloc_cmd_mailbox);
void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
struct hns_roce_cmd_mailbox *mailbox) struct hns_roce_cmd_mailbox *mailbox)
...@@ -366,3 +282,4 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev, ...@@ -366,3 +282,4 @@ void hns_roce_free_cmd_mailbox(struct hns_roce_dev *hr_dev,
dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma); dma_pool_free(hr_dev->cmd.pool, mailbox->buf, mailbox->dma);
kfree(mailbox); kfree(mailbox);
} }
EXPORT_SYMBOL_GPL(hns_roce_free_cmd_mailbox);
...@@ -36,6 +36,56 @@ ...@@ -36,6 +36,56 @@
#define HNS_ROCE_MAILBOX_SIZE 4096 #define HNS_ROCE_MAILBOX_SIZE 4096
#define HNS_ROCE_CMD_TIMEOUT_MSECS 10000 #define HNS_ROCE_CMD_TIMEOUT_MSECS 10000
enum {
/* QPC BT commands */
HNS_ROCE_CMD_WRITE_QPC_BT0 = 0x0,
HNS_ROCE_CMD_WRITE_QPC_BT1 = 0x1,
HNS_ROCE_CMD_WRITE_QPC_BT2 = 0x2,
HNS_ROCE_CMD_READ_QPC_BT0 = 0x4,
HNS_ROCE_CMD_READ_QPC_BT1 = 0x5,
HNS_ROCE_CMD_READ_QPC_BT2 = 0x6,
HNS_ROCE_CMD_DESTROY_QPC_BT0 = 0x8,
HNS_ROCE_CMD_DESTROY_QPC_BT1 = 0x9,
HNS_ROCE_CMD_DESTROY_QPC_BT2 = 0xa,
/* QPC operation */
HNS_ROCE_CMD_MODIFY_QPC = 0x41,
HNS_ROCE_CMD_QUERY_QPC = 0x42,
/* CQC BT commands */
HNS_ROCE_CMD_WRITE_CQC_BT0 = 0x10,
HNS_ROCE_CMD_WRITE_CQC_BT1 = 0x11,
HNS_ROCE_CMD_WRITE_CQC_BT2 = 0x12,
HNS_ROCE_CMD_READ_CQC_BT0 = 0x14,
HNS_ROCE_CMD_READ_CQC_BT1 = 0x15,
HNS_ROCE_CMD_READ_CQC_BT2 = 0x1b,
HNS_ROCE_CMD_DESTROY_CQC_BT0 = 0x18,
HNS_ROCE_CMD_DESTROY_CQC_BT1 = 0x19,
HNS_ROCE_CMD_DESTROY_CQC_BT2 = 0x1a,
/* MPT BT commands */
HNS_ROCE_CMD_WRITE_MPT_BT0 = 0x20,
HNS_ROCE_CMD_WRITE_MPT_BT1 = 0x21,
HNS_ROCE_CMD_WRITE_MPT_BT2 = 0x22,
HNS_ROCE_CMD_READ_MPT_BT0 = 0x24,
HNS_ROCE_CMD_READ_MPT_BT1 = 0x25,
HNS_ROCE_CMD_READ_MPT_BT2 = 0x26,
HNS_ROCE_CMD_DESTROY_MPT_BT0 = 0x28,
HNS_ROCE_CMD_DESTROY_MPT_BT1 = 0x29,
HNS_ROCE_CMD_DESTROY_MPT_BT2 = 0x2a,
/* SRQC BT commands */
HNS_ROCE_CMD_WRITE_SRQC_BT0 = 0x30,
HNS_ROCE_CMD_WRITE_SRQC_BT1 = 0x31,
HNS_ROCE_CMD_WRITE_SRQC_BT2 = 0x32,
HNS_ROCE_CMD_READ_SRQC_BT0 = 0x34,
HNS_ROCE_CMD_READ_SRQC_BT1 = 0x35,
HNS_ROCE_CMD_READ_SRQC_BT2 = 0x36,
HNS_ROCE_CMD_DESTROY_SRQC_BT0 = 0x38,
HNS_ROCE_CMD_DESTROY_SRQC_BT1 = 0x39,
HNS_ROCE_CMD_DESTROY_SRQC_BT2 = 0x3a,
};
enum { enum {
/* TPT commands */ /* TPT commands */
HNS_ROCE_CMD_SW2HW_MPT = 0xd, HNS_ROCE_CMD_SW2HW_MPT = 0xd,
......
...@@ -341,6 +341,7 @@ ...@@ -341,6 +341,7 @@
#define ROCEE_BT_CMD_L_REG 0x200 #define ROCEE_BT_CMD_L_REG 0x200
#define ROCEE_MB1_REG 0x210 #define ROCEE_MB1_REG 0x210
#define ROCEE_MB6_REG 0x224
#define ROCEE_DB_SQ_L_0_REG 0x230 #define ROCEE_DB_SQ_L_0_REG 0x230
#define ROCEE_DB_OTHERS_L_0_REG 0x238 #define ROCEE_DB_OTHERS_L_0_REG 0x238
#define ROCEE_QP1C_CFG0_0_REG 0x270 #define ROCEE_QP1C_CFG0_0_REG 0x270
...@@ -362,4 +363,26 @@ ...@@ -362,4 +363,26 @@
#define ROCEE_ECC_UCERR_ALM0_REG 0xB34 #define ROCEE_ECC_UCERR_ALM0_REG 0xB34
#define ROCEE_ECC_CERR_ALM0_REG 0xB40 #define ROCEE_ECC_CERR_ALM0_REG 0xB40
/* V2 ROCEE REG */
#define ROCEE_TX_CMQ_BASEADDR_L_REG 0x07000
#define ROCEE_TX_CMQ_BASEADDR_H_REG 0x07004
#define ROCEE_TX_CMQ_DEPTH_REG 0x07008
#define ROCEE_TX_CMQ_TAIL_REG 0x07010
#define ROCEE_TX_CMQ_HEAD_REG 0x07014
#define ROCEE_RX_CMQ_BASEADDR_L_REG 0x07018
#define ROCEE_RX_CMQ_BASEADDR_H_REG 0x0701c
#define ROCEE_RX_CMQ_DEPTH_REG 0x07020
#define ROCEE_RX_CMQ_TAIL_REG 0x07024
#define ROCEE_RX_CMQ_HEAD_REG 0x07028
#define ROCEE_VF_SMAC_CFG0_REG 0x12000
#define ROCEE_VF_SMAC_CFG1_REG 0x12004
#define ROCEE_VF_SGID_CFG0_REG 0x10000
#define ROCEE_VF_SGID_CFG1_REG 0x10004
#define ROCEE_VF_SGID_CFG2_REG 0x10008
#define ROCEE_VF_SGID_CFG3_REG 0x1000c
#define ROCEE_VF_SGID_CFG4_REG 0x10010
#endif /* _HNS_ROCE_COMMON_H */ #endif /* _HNS_ROCE_COMMON_H */
...@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq, ...@@ -58,7 +58,7 @@ static void hns_roce_ib_cq_event(struct hns_roce_cq *hr_cq,
if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID && if (event_type != HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR && event_type != HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR &&
event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) { event_type != HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW) {
dev_err(&hr_dev->pdev->dev, dev_err(hr_dev->dev,
"hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n", "hns_roce_ib: Unexpected event type 0x%x on CQ %06lx\n",
event_type, hr_cq->cqn); event_type, hr_cq->cqn);
return; return;
...@@ -85,17 +85,23 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, ...@@ -85,17 +85,23 @@ static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent,
struct hns_roce_uar *hr_uar, struct hns_roce_uar *hr_uar,
struct hns_roce_cq *hr_cq, int vector) struct hns_roce_cq *hr_cq, int vector)
{ {
struct hns_roce_cmd_mailbox *mailbox = NULL; struct hns_roce_cmd_mailbox *mailbox;
struct hns_roce_cq_table *cq_table = NULL; struct hns_roce_hem_table *mtt_table;
struct device *dev = &hr_dev->pdev->dev; struct hns_roce_cq_table *cq_table;
struct device *dev = hr_dev->dev;
dma_addr_t dma_handle; dma_addr_t dma_handle;
u64 *mtts = NULL; u64 *mtts;
int ret = 0; int ret;
cq_table = &hr_dev->cq_table; cq_table = &hr_dev->cq_table;
/* Get the physical address of cq buf */ /* Get the physical address of cq buf */
mtts = hns_roce_table_find(&hr_dev->mr_table.mtt_table, if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
mtt_table = &hr_dev->mr_table.mtt_cqe_table;
else
mtt_table = &hr_dev->mr_table.mtt_table;
mtts = hns_roce_table_find(hr_dev, mtt_table,
hr_mtt->first_seg, &dma_handle); hr_mtt->first_seg, &dma_handle);
if (!mtts) { if (!mtts) {
dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n"); dev_err(dev, "CQ alloc.Failed to find cq buf addr.\n");
...@@ -182,21 +188,22 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev, ...@@ -182,21 +188,22 @@ static int hns_roce_hw2sw_cq(struct hns_roce_dev *dev,
void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
{ {
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
int ret; int ret;
ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn); ret = hns_roce_hw2sw_cq(hr_dev, NULL, hr_cq->cqn);
if (ret) if (ret)
dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret, dev_err(dev, "HW2SW_CQ failed (%d) for CQN %06lx\n", ret,
hr_cq->cqn); hr_cq->cqn);
if (hr_dev->eq_table.eq) {
/* Waiting interrupt process procedure carried out */ /* Waiting interrupt process procedure carried out */
synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq); synchronize_irq(hr_dev->eq_table.eq[hr_cq->vector].irq);
/* wait for all interrupt processed */ /* wait for all interrupt processed */
if (atomic_dec_and_test(&hr_cq->refcount)) if (atomic_dec_and_test(&hr_cq->refcount))
complete(&hr_cq->free); complete(&hr_cq->free);
wait_for_completion(&hr_cq->free); wait_for_completion(&hr_cq->free);
}
spin_lock_irq(&cq_table->lock); spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, hr_cq->cqn); radix_tree_delete(&cq_table->tree, hr_cq->cqn);
...@@ -205,6 +212,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) ...@@ -205,6 +212,7 @@ void hns_roce_free_cq(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq)
hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn);
hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR);
} }
EXPORT_SYMBOL_GPL(hns_roce_free_cq);
static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
struct ib_ucontext *context, struct ib_ucontext *context,
...@@ -218,6 +226,10 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev, ...@@ -218,6 +226,10 @@ static int hns_roce_ib_get_cq_umem(struct hns_roce_dev *hr_dev,
if (IS_ERR(*umem)) if (IS_ERR(*umem))
return PTR_ERR(*umem); return PTR_ERR(*umem);
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
else
buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem), ret = hns_roce_mtt_init(hr_dev, ib_umem_page_count(*umem),
(*umem)->page_shift, &buf->hr_mtt); (*umem)->page_shift, &buf->hr_mtt);
if (ret) if (ret)
...@@ -247,6 +259,11 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev, ...@@ -247,6 +259,11 @@ static int hns_roce_ib_alloc_cq_buf(struct hns_roce_dev *hr_dev,
if (ret) if (ret)
goto out; goto out;
if (hns_roce_check_whether_mhop(hr_dev, HEM_TYPE_CQE))
buf->hr_mtt.mtt_type = MTT_TYPE_CQE;
else
buf->hr_mtt.mtt_type = MTT_TYPE_WQE;
ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages, ret = hns_roce_mtt_init(hr_dev, buf->hr_buf.npages,
buf->hr_buf.page_shift, &buf->hr_mtt); buf->hr_buf.page_shift, &buf->hr_mtt);
if (ret) if (ret)
...@@ -281,13 +298,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -281,13 +298,13 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_ib_create_cq ucmd; struct hns_roce_ib_create_cq ucmd;
struct hns_roce_cq *hr_cq = NULL; struct hns_roce_cq *hr_cq = NULL;
struct hns_roce_uar *uar = NULL; struct hns_roce_uar *uar = NULL;
int vector = attr->comp_vector; int vector = attr->comp_vector;
int cq_entries = attr->cqe; int cq_entries = attr->cqe;
int ret = 0; int ret;
if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) { if (cq_entries < 1 || cq_entries > hr_dev->caps.max_cqes) {
dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n", dev_err(dev, "Creat CQ failed. entries=%d, max=%d\n",
...@@ -295,13 +312,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -295,13 +312,12 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
hr_cq = kmalloc(sizeof(*hr_cq), GFP_KERNEL); hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL);
if (!hr_cq) if (!hr_cq)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
/* In v1 engine, parameter verification */ if (hr_dev->caps.min_cqes)
if (cq_entries < HNS_ROCE_MIN_CQE_NUM) cq_entries = max(cq_entries, hr_dev->caps.min_cqes);
cq_entries = HNS_ROCE_MIN_CQE_NUM;
cq_entries = roundup_pow_of_two((unsigned int)cq_entries); cq_entries = roundup_pow_of_two((unsigned int)cq_entries);
hr_cq->ib_cq.cqe = cq_entries - 1; hr_cq->ib_cq.cqe = cq_entries - 1;
...@@ -335,8 +351,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -335,8 +351,8 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
} }
uar = &hr_dev->priv_uar; uar = &hr_dev->priv_uar;
hr_cq->cq_db_l = hr_dev->reg_base + ROCEE_DB_OTHERS_L_0_REG + hr_cq->cq_db_l = hr_dev->reg_base + hr_dev->odb_offset +
0x1000 * uar->index; DB_REG_OFFSET * uar->index;
} }
/* Allocate cq index, fill cq_context */ /* Allocate cq index, fill cq_context */
...@@ -353,7 +369,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -353,7 +369,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
* problems if tptr is set to zero here, so we initialze it in user * problems if tptr is set to zero here, so we initialze it in user
* space. * space.
*/ */
if (!context) if (!context && hr_cq->tptr_addr)
*hr_cq->tptr_addr = 0; *hr_cq->tptr_addr = 0;
/* Get created cq handler and carry out event */ /* Get created cq handler and carry out event */
...@@ -385,6 +401,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev, ...@@ -385,6 +401,7 @@ struct ib_cq *hns_roce_ib_create_cq(struct ib_device *ib_dev,
kfree(hr_cq); kfree(hr_cq);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
EXPORT_SYMBOL_GPL(hns_roce_ib_create_cq);
int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
{ {
...@@ -410,10 +427,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq) ...@@ -410,10 +427,11 @@ int hns_roce_ib_destroy_cq(struct ib_cq *ib_cq)
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(hns_roce_ib_destroy_cq);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
{ {
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq; struct hns_roce_cq *cq;
cq = radix_tree_lookup(&hr_dev->cq_table.tree, cq = radix_tree_lookup(&hr_dev->cq_table.tree,
...@@ -429,7 +447,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) ...@@ -429,7 +447,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn)
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type)
{ {
struct hns_roce_cq_table *cq_table = &hr_dev->cq_table; struct hns_roce_cq_table *cq_table = &hr_dev->cq_table;
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_cq *cq; struct hns_roce_cq *cq;
cq = radix_tree_lookup(&cq_table->tree, cq = radix_tree_lookup(&cq_table->tree,
......
...@@ -78,6 +78,8 @@ ...@@ -78,6 +78,8 @@
#define HNS_ROCE_MAX_GID_NUM 16 #define HNS_ROCE_MAX_GID_NUM 16
#define HNS_ROCE_GID_SIZE 16 #define HNS_ROCE_GID_SIZE 16
#define HNS_ROCE_HOP_NUM_0 0xff
#define BITMAP_NO_RR 0 #define BITMAP_NO_RR 0
#define BITMAP_RR 1 #define BITMAP_RR 1
...@@ -168,6 +170,11 @@ enum { ...@@ -168,6 +170,11 @@ enum {
HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07, HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE = 0x07,
}; };
enum hns_roce_mtt_type {
MTT_TYPE_WQE,
MTT_TYPE_CQE,
};
#define HNS_ROCE_CMD_SUCCESS 1 #define HNS_ROCE_CMD_SUCCESS 1
#define HNS_ROCE_PORT_DOWN 0 #define HNS_ROCE_PORT_DOWN 0
...@@ -232,12 +239,17 @@ struct hns_roce_hem_table { ...@@ -232,12 +239,17 @@ struct hns_roce_hem_table {
int lowmem; int lowmem;
struct mutex mutex; struct mutex mutex;
struct hns_roce_hem **hem; struct hns_roce_hem **hem;
u64 **bt_l1;
dma_addr_t *bt_l1_dma_addr;
u64 **bt_l0;
dma_addr_t *bt_l0_dma_addr;
}; };
struct hns_roce_mtt { struct hns_roce_mtt {
unsigned long first_seg; unsigned long first_seg;
int order; int order;
int page_shift; int page_shift;
enum hns_roce_mtt_type mtt_type;
}; };
/* Only support 4K page size for mr register */ /* Only support 4K page size for mr register */
...@@ -255,6 +267,19 @@ struct hns_roce_mr { ...@@ -255,6 +267,19 @@ struct hns_roce_mr {
int type; /* MR's register type */ int type; /* MR's register type */
u64 *pbl_buf;/* MR's PBL space */ u64 *pbl_buf;/* MR's PBL space */
dma_addr_t pbl_dma_addr; /* MR's PBL space PA */ dma_addr_t pbl_dma_addr; /* MR's PBL space PA */
u32 pbl_size;/* PA number in the PBL */
u64 pbl_ba;/* page table address */
u32 l0_chunk_last_num;/* L0 last number */
u32 l1_chunk_last_num;/* L1 last number */
u64 **pbl_bt_l2;/* PBL BT L2 */
u64 **pbl_bt_l1;/* PBL BT L1 */
u64 *pbl_bt_l0;/* PBL BT L0 */
dma_addr_t *pbl_l2_dma_addr;/* PBL BT L2 dma addr */
dma_addr_t *pbl_l1_dma_addr;/* PBL BT L1 dma addr */
dma_addr_t pbl_l0_dma_addr;/* PBL BT L0 dma addr */
u32 pbl_ba_pg_sz;/* BT chunk page size */
u32 pbl_buf_pg_sz;/* buf chunk page size */
u32 pbl_hop_num;/* multi-hop number */
}; };
struct hns_roce_mr_table { struct hns_roce_mr_table {
...@@ -262,6 +287,8 @@ struct hns_roce_mr_table { ...@@ -262,6 +287,8 @@ struct hns_roce_mr_table {
struct hns_roce_buddy mtt_buddy; struct hns_roce_buddy mtt_buddy;
struct hns_roce_hem_table mtt_table; struct hns_roce_hem_table mtt_table;
struct hns_roce_hem_table mtpt_table; struct hns_roce_hem_table mtpt_table;
struct hns_roce_buddy mtt_cqe_buddy;
struct hns_roce_hem_table mtt_cqe_table;
}; };
struct hns_roce_wq { struct hns_roce_wq {
...@@ -277,6 +304,12 @@ struct hns_roce_wq { ...@@ -277,6 +304,12 @@ struct hns_roce_wq {
void __iomem *db_reg_l; void __iomem *db_reg_l;
}; };
struct hns_roce_sge {
int sge_cnt; /* SGE num */
int offset;
int sge_shift;/* SGE size */
};
struct hns_roce_buf_list { struct hns_roce_buf_list {
void *buf; void *buf;
dma_addr_t map; dma_addr_t map;
...@@ -367,7 +400,6 @@ struct hns_roce_cmd_context { ...@@ -367,7 +400,6 @@ struct hns_roce_cmd_context {
struct hns_roce_cmdq { struct hns_roce_cmdq {
struct dma_pool *pool; struct dma_pool *pool;
u8 __iomem *hcr;
struct mutex hcr_mutex; struct mutex hcr_mutex;
struct semaphore poll_sem; struct semaphore poll_sem;
/* /*
...@@ -429,6 +461,9 @@ struct hns_roce_qp { ...@@ -429,6 +461,9 @@ struct hns_roce_qp {
atomic_t refcount; atomic_t refcount;
struct completion free; struct completion free;
struct hns_roce_sge sge;
u32 next_sge;
}; };
struct hns_roce_sqp { struct hns_roce_sqp {
...@@ -477,16 +512,20 @@ struct hns_roce_caps { ...@@ -477,16 +512,20 @@ struct hns_roce_caps {
u32 max_wqes; /* 16k */ u32 max_wqes; /* 16k */
u32 max_sq_desc_sz; /* 64 */ u32 max_sq_desc_sz; /* 64 */
u32 max_rq_desc_sz; /* 64 */ u32 max_rq_desc_sz; /* 64 */
u32 max_srq_desc_sz;
int max_qp_init_rdma; int max_qp_init_rdma;
int max_qp_dest_rdma; int max_qp_dest_rdma;
int num_cqs; int num_cqs;
int max_cqes; int max_cqes;
int min_cqes;
u32 min_wqes;
int reserved_cqs; int reserved_cqs;
int num_aeq_vectors; /* 1 */ int num_aeq_vectors; /* 1 */
int num_comp_vectors; /* 32 ceq */ int num_comp_vectors; /* 32 ceq */
int num_other_vectors; int num_other_vectors;
int num_mtpts; int num_mtpts;
u32 num_mtt_segs; u32 num_mtt_segs;
u32 num_cqe_segs;
int reserved_mrws; int reserved_mrws;
int reserved_uars; int reserved_uars;
int num_pds; int num_pds;
...@@ -499,16 +538,47 @@ struct hns_roce_caps { ...@@ -499,16 +538,47 @@ struct hns_roce_caps {
int qpc_entry_sz; int qpc_entry_sz;
int irrl_entry_sz; int irrl_entry_sz;
int cqc_entry_sz; int cqc_entry_sz;
u32 pbl_ba_pg_sz;
u32 pbl_buf_pg_sz;
u32 pbl_hop_num;
int aeqe_depth; int aeqe_depth;
int ceqe_depth[HNS_ROCE_COMP_VEC_NUM]; int ceqe_depth[HNS_ROCE_COMP_VEC_NUM];
enum ib_mtu max_mtu; enum ib_mtu max_mtu;
u32 qpc_bt_num;
u32 srqc_bt_num;
u32 cqc_bt_num;
u32 mpt_bt_num;
u32 qpc_ba_pg_sz;
u32 qpc_buf_pg_sz;
u32 qpc_hop_num;
u32 srqc_ba_pg_sz;
u32 srqc_buf_pg_sz;
u32 srqc_hop_num;
u32 cqc_ba_pg_sz;
u32 cqc_buf_pg_sz;
u32 cqc_hop_num;
u32 mpt_ba_pg_sz;
u32 mpt_buf_pg_sz;
u32 mpt_hop_num;
u32 mtt_ba_pg_sz;
u32 mtt_buf_pg_sz;
u32 mtt_hop_num;
u32 cqe_ba_pg_sz;
u32 cqe_buf_pg_sz;
u32 cqe_hop_num;
}; };
struct hns_roce_hw { struct hns_roce_hw {
int (*reset)(struct hns_roce_dev *hr_dev, bool enable); int (*reset)(struct hns_roce_dev *hr_dev, bool enable);
void (*hw_profile)(struct hns_roce_dev *hr_dev); int (*cmq_init)(struct hns_roce_dev *hr_dev);
void (*cmq_exit)(struct hns_roce_dev *hr_dev);
int (*hw_profile)(struct hns_roce_dev *hr_dev);
int (*hw_init)(struct hns_roce_dev *hr_dev); int (*hw_init)(struct hns_roce_dev *hr_dev);
void (*hw_exit)(struct hns_roce_dev *hr_dev); void (*hw_exit)(struct hns_roce_dev *hr_dev);
int (*post_mbox)(struct hns_roce_dev *hr_dev, u64 in_param,
u64 out_param, u32 in_modifier, u8 op_modifier, u16 op,
u16 token, int event);
int (*chk_mbox)(struct hns_roce_dev *hr_dev, unsigned long timeout);
void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index, void (*set_gid)(struct hns_roce_dev *hr_dev, u8 port, int gid_index,
union ib_gid *gid); union ib_gid *gid);
void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); void (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
...@@ -519,8 +589,11 @@ struct hns_roce_hw { ...@@ -519,8 +589,11 @@ struct hns_roce_hw {
void (*write_cqc)(struct hns_roce_dev *hr_dev, void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
dma_addr_t dma_handle, int nent, u32 vector); dma_addr_t dma_handle, int nent, u32 vector);
int (*set_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj, int step_idx);
int (*clear_hem)(struct hns_roce_dev *hr_dev, int (*clear_hem)(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, int obj); struct hns_roce_hem_table *table, int obj,
int step_idx);
int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int (*query_qp)(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr); int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr);
int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int (*modify_qp)(struct ib_qp *ibqp, const struct ib_qp_attr *attr,
...@@ -535,12 +608,13 @@ struct hns_roce_hw { ...@@ -535,12 +608,13 @@ struct hns_roce_hw {
int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); int (*poll_cq)(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr); int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr);
int (*destroy_cq)(struct ib_cq *ibcq); int (*destroy_cq)(struct ib_cq *ibcq);
void *priv;
}; };
struct hns_roce_dev { struct hns_roce_dev {
struct ib_device ib_dev; struct ib_device ib_dev;
struct platform_device *pdev; struct platform_device *pdev;
struct pci_dev *pci_dev;
struct device *dev;
struct hns_roce_uar priv_uar; struct hns_roce_uar priv_uar;
const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; const char *irq_names[HNS_ROCE_MAX_IRQ_NUM];
spinlock_t sm_lock; spinlock_t sm_lock;
...@@ -569,9 +643,12 @@ struct hns_roce_dev { ...@@ -569,9 +643,12 @@ struct hns_roce_dev {
int cmd_mod; int cmd_mod;
int loop_idc; int loop_idc;
u32 sdb_offset;
u32 odb_offset;
dma_addr_t tptr_dma_addr; /*only for hw v1*/ dma_addr_t tptr_dma_addr; /*only for hw v1*/
u32 tptr_size; /*only for hw v1*/ u32 tptr_size; /*only for hw v1*/
struct hns_roce_hw *hw; const struct hns_roce_hw *hw;
void *priv;
}; };
static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev)
...@@ -723,6 +800,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -723,6 +800,7 @@ int hns_roce_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata); int attr_mask, struct ib_udata *udata);
void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n); void *get_recv_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_wqe(struct hns_roce_qp *hr_qp, int n); void *get_send_wqe(struct hns_roce_qp *hr_qp, int n);
void *get_send_extend_sge(struct hns_roce_qp *hr_qp, int n);
bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
struct ib_cq *ib_cq); struct ib_cq *ib_cq);
enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state); enum hns_roce_qp_state to_hns_roce_state(enum ib_qp_state state);
...@@ -749,7 +827,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn); ...@@ -749,7 +827,7 @@ void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type); void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type); void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index); int hns_get_gid_index(struct hns_roce_dev *hr_dev, u8 port, int gid_index);
int hns_roce_init(struct hns_roce_dev *hr_dev);
extern struct hns_roce_hw hns_roce_hw_v1; void hns_roce_exit(struct hns_roce_dev *hr_dev);
#endif /* _HNS_ROCE_DEVICE_H */ #endif /* _HNS_ROCE_DEVICE_H */
This diff is collapsed.
...@@ -47,6 +47,7 @@ enum { ...@@ -47,6 +47,7 @@ enum {
/* UNMAP HEM */ /* UNMAP HEM */
HEM_TYPE_MTT, HEM_TYPE_MTT,
HEM_TYPE_CQE,
HEM_TYPE_IRRL, HEM_TYPE_IRRL,
}; };
...@@ -54,6 +55,18 @@ enum { ...@@ -54,6 +55,18 @@ enum {
((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \ ((256 - sizeof(struct list_head) - 2 * sizeof(int)) / \
(sizeof(struct scatterlist))) (sizeof(struct scatterlist)))
#define check_whether_bt_num_3(type, hop_num) \
(type < HEM_TYPE_MTT && hop_num == 2)
#define check_whether_bt_num_2(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == 2))
#define check_whether_bt_num_1(type, hop_num) \
((type < HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0) || \
(type >= HEM_TYPE_MTT && hop_num == 1) || \
(type >= HEM_TYPE_MTT && hop_num == HNS_ROCE_HOP_NUM_0))
enum { enum {
HNS_ROCE_HEM_PAGE_SHIFT = 12, HNS_ROCE_HEM_PAGE_SHIFT = 12,
HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT, HNS_ROCE_HEM_PAGE_SIZE = 1 << HNS_ROCE_HEM_PAGE_SHIFT,
...@@ -77,12 +90,23 @@ struct hns_roce_hem_iter { ...@@ -77,12 +90,23 @@ struct hns_roce_hem_iter {
int page_idx; int page_idx;
}; };
struct hns_roce_hem_mhop {
u32 hop_num;
u32 buf_chunk_size;
u32 bt_chunk_size;
u32 ba_l0_num;
u32 l0_idx;/* level 0 base address table index */
u32 l1_idx;/* level 1 base address table index */
u32 l2_idx;/* level 2 base address table index */
};
void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem); void hns_roce_free_hem(struct hns_roce_dev *hr_dev, struct hns_roce_hem *hem);
int hns_roce_table_get(struct hns_roce_dev *hr_dev, int hns_roce_table_get(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj); struct hns_roce_hem_table *table, unsigned long obj);
void hns_roce_table_put(struct hns_roce_dev *hr_dev, void hns_roce_table_put(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj); struct hns_roce_hem_table *table, unsigned long obj);
void *hns_roce_table_find(struct hns_roce_hem_table *table, unsigned long obj, void *hns_roce_table_find(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long obj,
dma_addr_t *dma_handle); dma_addr_t *dma_handle);
int hns_roce_table_get_range(struct hns_roce_dev *hr_dev, int hns_roce_table_get_range(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, struct hns_roce_hem_table *table,
...@@ -97,6 +121,10 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev, ...@@ -97,6 +121,10 @@ int hns_roce_init_hem_table(struct hns_roce_dev *hr_dev,
void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev, void hns_roce_cleanup_hem_table(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table); struct hns_roce_hem_table *table);
void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev); void hns_roce_cleanup_hem(struct hns_roce_dev *hr_dev);
int hns_roce_calc_hem_mhop(struct hns_roce_dev *hr_dev,
struct hns_roce_hem_table *table, unsigned long *obj,
struct hns_roce_hem_mhop *mhop);
bool hns_roce_check_whether_mhop(struct hns_roce_dev *hr_dev, u32 type);
static inline void hns_roce_hem_first(struct hns_roce_hem *hem, static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
struct hns_roce_hem_iter *iter) struct hns_roce_hem_iter *iter)
...@@ -105,7 +133,7 @@ static inline void hns_roce_hem_first(struct hns_roce_hem *hem, ...@@ -105,7 +133,7 @@ static inline void hns_roce_hem_first(struct hns_roce_hem *hem,
iter->chunk = list_empty(&hem->chunk_list) ? NULL : iter->chunk = list_empty(&hem->chunk_list) ? NULL :
list_entry(hem->chunk_list.next, list_entry(hem->chunk_list.next,
struct hns_roce_hem_chunk, list); struct hns_roce_hem_chunk, list);
iter->page_idx = 0; iter->page_idx = 0;
} }
static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter) static inline int hns_roce_hem_last(struct hns_roce_hem_iter *iter)
......
This diff is collapsed.
...@@ -948,6 +948,11 @@ struct hns_roce_qp_context { ...@@ -948,6 +948,11 @@ struct hns_roce_qp_context {
#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \ #define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \
(((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S) (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S)
#define STATUS_MASK 0xff
#define GO_BIT_TIMEOUT_MSECS 10000
#define HCR_STATUS_OFFSET 0x18
#define HCR_GO_BIT 15
struct hns_roce_rq_db { struct hns_roce_rq_db {
u32 u32_4; u32 u32_4;
u32 u32_8; u32 u32_8;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
*/ */
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/pci.h>
#include "hns_roce_device.h" #include "hns_roce_device.h"
static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn) static int hns_roce_pd_alloc(struct hns_roce_dev *hr_dev, unsigned long *pdn)
...@@ -60,7 +61,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -60,7 +61,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev); struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = &hr_dev->pdev->dev; struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd; struct hns_roce_pd *pd;
int ret; int ret;
...@@ -86,6 +87,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev, ...@@ -86,6 +87,7 @@ struct ib_pd *hns_roce_alloc_pd(struct ib_device *ib_dev,
return &pd->ibpd; return &pd->ibpd;
} }
EXPORT_SYMBOL_GPL(hns_roce_alloc_pd);
int hns_roce_dealloc_pd(struct ib_pd *pd) int hns_roce_dealloc_pd(struct ib_pd *pd)
{ {
...@@ -94,6 +96,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd) ...@@ -94,6 +96,7 @@ int hns_roce_dealloc_pd(struct ib_pd *pd)
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(hns_roce_dealloc_pd);
int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
{ {
...@@ -109,12 +112,17 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) ...@@ -109,12 +112,17 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar)
uar->index = (uar->index - 1) % uar->index = (uar->index - 1) %
(hr_dev->caps.phy_num_uars - 1) + 1; (hr_dev->caps.phy_num_uars - 1) + 1;
res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); if (!dev_is_pci(hr_dev->dev)) {
if (!res) { res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0);
dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); if (!res) {
return -EINVAL; dev_err(&hr_dev->pdev->dev, "memory resource not found!\n");
return -EINVAL;
}
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
} else {
uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2))
>> PAGE_SHIFT);
} }
uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index;
return 0; return 0;
} }
......
This diff is collapsed.
config INFINIBAND_I40IW config INFINIBAND_I40IW
tristate "Intel(R) Ethernet X722 iWARP Driver" tristate "Intel(R) Ethernet X722 iWARP Driver"
depends on INET && I40E depends on INET && I40E
depends on PCI
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
---help--- ---help---
Intel(R) Ethernet X722 iWARP Driver Intel(R) Ethernet X722 iWARP Driver
......
...@@ -1229,13 +1229,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -1229,13 +1229,13 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont, mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
page_shift, order, access_flags); page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) { if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d", order); mlx5_ib_dbg(dev, "cache empty for order %d\n", order);
mr = NULL; mr = NULL;
} }
} else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) { } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
if (access_flags & IB_ACCESS_ON_DEMAND) { if (access_flags & IB_ACCESS_ON_DEMAND) {
err = -EINVAL; err = -EINVAL;
pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB"); pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB\n");
goto error; goto error;
} }
use_umr = false; use_umr = false;
......
...@@ -1093,7 +1093,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe) ...@@ -1093,7 +1093,7 @@ static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
rsp = &mqe->u.rsp; rsp = &mqe->u.rsp;
if (cqe_status || ext_status) { if (cqe_status || ext_status) {
pr_err("%s() cqe_status=0x%x, ext_status=0x%x,", pr_err("%s() cqe_status=0x%x, ext_status=0x%x,\n",
__func__, cqe_status, ext_status); __func__, cqe_status, ext_status);
if (rsp) { if (rsp) {
/* This is for embedded cmds. */ /* This is for embedded cmds. */
......
...@@ -658,7 +658,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp, ...@@ -658,7 +658,7 @@ static ssize_t ocrdma_dbgfs_ops_write(struct file *filp,
if (reset) { if (reset) {
status = ocrdma_mbx_rdma_stats(dev, true); status = ocrdma_mbx_rdma_stats(dev, true);
if (status) { if (status) {
pr_err("Failed to reset stats = %d", status); pr_err("Failed to reset stats = %d\n", status);
goto err; goto err;
} }
} }
......
config INFINIBAND_QEDR config INFINIBAND_QEDR
tristate "QLogic RoCE driver" tristate "QLogic RoCE driver"
depends on 64BIT && QEDE depends on 64BIT && QEDE
depends on PCI
select QED_LL2 select QED_LL2
select QED_RDMA select QED_RDMA
---help--- ---help---
......
config INFINIBAND_QIB config INFINIBAND_QIB
tristate "Intel PCIe HCA support" tristate "Intel PCIe HCA support"
depends on 64BIT && INFINIBAND_RDMAVT depends on 64BIT && INFINIBAND_RDMAVT
depends on PCI
---help--- ---help---
This is a low-level driver for Intel PCIe QLE InfiniBand host This is a low-level driver for Intel PCIe QLE InfiniBand host
channel adapters. This driver does not support the Intel channel adapters. This driver does not support the Intel
......
config INFINIBAND_RDMAVT config INFINIBAND_RDMAVT
tristate "RDMA verbs transport library" tristate "RDMA verbs transport library"
depends on 64BIT depends on 64BIT
depends on PCI
select DMA_VIRT_OPS select DMA_VIRT_OPS
---help--- ---help---
This is a common software verbs provider for RDMA networks. This is a common software verbs provider for RDMA networks.
...@@ -51,7 +51,6 @@ ...@@ -51,7 +51,6 @@
#include <net/addrconf.h> #include <net/addrconf.h>
#include <linux/inetdevice.h> #include <linux/inetdevice.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include <linux/pci.h>
#define DRV_VERSION "1.0.0" #define DRV_VERSION "1.0.0"
...@@ -2312,7 +2311,8 @@ static void ipoib_add_one(struct ib_device *device) ...@@ -2312,7 +2311,8 @@ static void ipoib_add_one(struct ib_device *device)
} }
if (!count) { if (!count) {
kfree(dev_list); pr_err("Failed to init port, removing it\n");
ipoib_remove_one(device, dev_list);
return; return;
} }
......
...@@ -34,7 +34,7 @@ config LNET_SELFTEST ...@@ -34,7 +34,7 @@ config LNET_SELFTEST
config LNET_XPRT_IB config LNET_XPRT_IB
tristate "LNET infiniband support" tristate "LNET infiniband support"
depends on LNET && INFINIBAND && INFINIBAND_ADDR_TRANS depends on LNET && PCI && INFINIBAND && INFINIBAND_ADDR_TRANS
default LNET && INFINIBAND default LNET && INFINIBAND
help help
This option allows the LNET users to use infiniband as an This option allows the LNET users to use infiniband as an
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment