Commit f0e2dcff authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/core: Remove unused struct ib_device.flags member
  IB/core: Add IP checksum offload support
  IPoIB: Add send gather support
  IPoIB: Add high DMA feature flag
  IB/mlx4: Use multiple WQ blocks to post smaller send WQEs
  mlx4_core: Clean up struct mlx4_buf
  mlx4_core: For 64-bit systems, vmap() kernel queue buffers
  IB/mlx4: Consolidate code to get an entry from a struct mlx4_buf
parents 04a94bab 5128bdc9
...@@ -64,13 +64,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) ...@@ -64,13 +64,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
{ {
int offset = n * sizeof (struct mlx4_cqe); return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
if (buf->buf.nbufs == 1)
return buf->buf.u.direct.buf + offset;
else
return buf->buf.u.page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
} }
static void *get_cqe(struct mlx4_ib_cq *cq, int n) static void *get_cqe(struct mlx4_ib_cq *cq, int n)
...@@ -332,6 +326,12 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -332,6 +326,12 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR; MLX4_CQE_OPCODE_ERROR;
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
is_send)) {
printk(KERN_WARNING "Completion for NOP opcode detected!\n");
return -EINVAL;
}
if (!*cur_qp || if (!*cur_qp ||
(be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
/* /*
...@@ -354,8 +354,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -354,8 +354,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
if (is_send) { if (is_send) {
wq = &(*cur_qp)->sq; wq = &(*cur_qp)->sq;
if (!(*cur_qp)->sq_signal_bits) {
wqe_ctr = be16_to_cpu(cqe->wqe_index); wqe_ctr = be16_to_cpu(cqe->wqe_index);
wq->tail += (u16) (wqe_ctr - (u16) wq->tail); wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
}
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail; ++wq->tail;
} else if ((*cur_qp)->ibqp.srq) { } else if ((*cur_qp)->ibqp.srq) {
......
...@@ -120,6 +120,8 @@ struct mlx4_ib_qp { ...@@ -120,6 +120,8 @@ struct mlx4_ib_qp {
u32 doorbell_qpn; u32 doorbell_qpn;
__be32 sq_signal_bits; __be32 sq_signal_bits;
unsigned sq_next_wqe;
int sq_max_wqes_per_wr;
int sq_spare_wqes; int sq_spare_wqes;
struct mlx4_ib_wq sq; struct mlx4_ib_wq sq;
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
* SOFTWARE. * SOFTWARE.
*/ */
#include <linux/log2.h>
#include <rdma/ib_cache.h> #include <rdma/ib_cache.h>
#include <rdma/ib_pack.h> #include <rdma/ib_pack.h>
...@@ -96,11 +98,7 @@ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) ...@@ -96,11 +98,7 @@ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp)
static void *get_wqe(struct mlx4_ib_qp *qp, int offset) static void *get_wqe(struct mlx4_ib_qp *qp, int offset)
{ {
if (qp->buf.nbufs == 1) return mlx4_buf_offset(&qp->buf, offset);
return qp->buf.u.direct.buf + offset;
else
return qp->buf.u.page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
} }
static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n)
...@@ -115,16 +113,87 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) ...@@ -115,16 +113,87 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n)
/* /*
* Stamp a SQ WQE so that it is invalid if prefetched by marking the * Stamp a SQ WQE so that it is invalid if prefetched by marking the
* first four bytes of every 64 byte chunk with 0xffffffff, except for * first four bytes of every 64 byte chunk with
* the very first chunk of the WQE. * 0x7FFFFFF | (invalid_ownership_value << 31).
*
* When the max work request size is less than or equal to the WQE
* basic block size, as an optimization, we can stamp all WQEs with
* 0xffffffff, and skip the very first chunk of each WQE.
*/ */
static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size)
{ {
u32 *wqe = get_send_wqe(qp, n); u32 *wqe;
int i; int i;
int s;
int ind;
void *buf;
__be32 stamp;
s = roundup(size, 1U << qp->sq.wqe_shift);
if (qp->sq_max_wqes_per_wr > 1) {
for (i = 0; i < s; i += 64) {
ind = (i >> qp->sq.wqe_shift) + n;
stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) :
cpu_to_be32(0xffffffff);
buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1));
*wqe = stamp;
}
} else {
buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
for (i = 64; i < s; i += 64) {
wqe = buf + i;
*wqe = 0xffffffff;
}
}
}
static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size)
{
struct mlx4_wqe_ctrl_seg *ctrl;
struct mlx4_wqe_inline_seg *inl;
void *wqe;
int s;
ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1));
s = sizeof(struct mlx4_wqe_ctrl_seg);
if (qp->ibqp.qp_type == IB_QPT_UD) {
struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl;
struct mlx4_av *av = (struct mlx4_av *)dgram->av;
memset(dgram, 0, sizeof *dgram);
av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn);
s += sizeof(struct mlx4_wqe_datagram_seg);
}
for (i = 16; i < 1 << (qp->sq.wqe_shift - 2); i += 16) /* Pad the remainder of the WQE with an inline data segment. */
wqe[i] = 0xffffffff; if (size > s) {
inl = wqe + s;
inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl));
}
ctrl->srcrb_flags = 0;
ctrl->fence_size = size / 16;
/*
* Make sure descriptor is fully written before setting ownership bit
* (because HW can start executing as soon as we do).
*/
wmb();
ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) |
(n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
stamp_send_wqe(qp, n + qp->sq_spare_wqes, size);
}
/* Post NOP WQE to prevent wrap-around in the middle of WR */
static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind)
{
unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1));
if (unlikely(s < qp->sq_max_wqes_per_wr)) {
post_nop_wqe(qp, ind, s << qp->sq.wqe_shift);
ind += s;
}
return ind;
} }
static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
...@@ -241,6 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, ...@@ -241,6 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
enum ib_qp_type type, struct mlx4_ib_qp *qp) enum ib_qp_type type, struct mlx4_ib_qp *qp)
{ {
int s;
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if (cap->max_send_wr > dev->dev->caps.max_wqes || if (cap->max_send_wr > dev->dev->caps.max_wqes ||
cap->max_send_sge > dev->dev->caps.max_sq_sg || cap->max_send_sge > dev->dev->caps.max_sq_sg ||
...@@ -256,20 +327,74 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, ...@@ -256,20 +327,74 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg)
return -EINVAL; return -EINVAL;
qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
sizeof (struct mlx4_wqe_data_seg), cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
cap->max_inline_data + send_wqe_overhead(type);
sizeof (struct mlx4_wqe_inline_seg)) +
send_wqe_overhead(type)));
qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) /
sizeof (struct mlx4_wqe_data_seg);
/* /*
* We need to leave 2 KB + 1 WQE of headroom in the SQ to * Hermon supports shrinking WQEs, such that a single work
* request can include multiple units of 1 << wqe_shift. This
* way, work requests can differ in size, and do not have to
* be a power of 2 in size, saving memory and speeding up send
* WR posting. Unfortunately, if we do this then the
* wqe_index field in CQEs can't be used to look up the WR ID
* anymore, so we do this only if selective signaling is off.
*
* Further, on 32-bit platforms, we can't use vmap() to make
* the QP buffer virtually contigious. Thus we have to use
* constant-sized WRs to make sure a WR is always fully within
* a single page-sized chunk.
*
* Finally, we use NOP work requests to pad the end of the
* work queue, to avoid wrap-around in the middle of WR. We
* set NEC bit to avoid getting completions with error for
* these NOP WRs, but since NEC is only supported starting
* with firmware 2.2.232, we use constant-sized WRs for older
* firmware.
*
* And, since MLX QPs only support SEND, we use constant-sized
* WRs in this case.
*
* We look for the smallest value of wqe_shift such that the
* resulting number of wqes does not exceed device
* capabilities.
*
* We set WQE size to at least 64 bytes, this way stamping
* invalidates each WQE.
*/
if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC &&
qp->sq_signal_bits && BITS_PER_LONG == 64 &&
type != IB_QPT_SMI && type != IB_QPT_GSI)
qp->sq.wqe_shift = ilog2(64);
else
qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s));
for (;;) {
if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz)
return -EINVAL;
qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift);
/*
* We need to leave 2 KB + 1 WR of headroom in the SQ to
* allow HW to prefetch. * allow HW to prefetch.
*/ */
qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1; qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr;
qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + qp->sq_spare_wqes); qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr *
qp->sq_max_wqes_per_wr +
qp->sq_spare_wqes);
if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes)
break;
if (qp->sq_max_wqes_per_wr <= 1)
return -EINVAL;
++qp->sq.wqe_shift;
}
qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg);
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift); (qp->sq.wqe_cnt << qp->sq.wqe_shift);
...@@ -281,7 +406,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, ...@@ -281,7 +406,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
qp->sq.offset = 0; qp->sq.offset = 0;
} }
cap->max_send_wr = qp->sq.max_post = qp->sq.wqe_cnt - qp->sq_spare_wqes; cap->max_send_wr = qp->sq.max_post =
(qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr;
cap->max_send_sge = qp->sq.max_gs; cap->max_send_sge = qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */ /* We don't support inline sends for kernel QPs (yet) */
cap->max_inline_data = 0; cap->max_inline_data = 0;
...@@ -327,6 +453,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -327,6 +453,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
qp->rq.tail = 0; qp->rq.tail = 0;
qp->sq.head = 0; qp->sq.head = 0;
qp->sq.tail = 0; qp->sq.tail = 0;
qp->sq_next_wqe = 0;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
else
qp->sq_signal_bits = 0;
err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp);
if (err) if (err)
...@@ -417,11 +549,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -417,11 +549,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
*/ */
qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE);
else
qp->sq_signal_bits = 0;
qp->mqp.event = mlx4_ib_qp_event; qp->mqp.event = mlx4_ib_qp_event;
return 0; return 0;
...@@ -916,7 +1043,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -916,7 +1043,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
ctrl = get_send_wqe(qp, i); ctrl = get_send_wqe(qp, i);
ctrl->owner_opcode = cpu_to_be32(1 << 31); ctrl->owner_opcode = cpu_to_be32(1 << 31);
stamp_send_wqe(qp, i); stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift);
} }
} }
...@@ -969,6 +1096,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -969,6 +1096,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
qp->rq.tail = 0; qp->rq.tail = 0;
qp->sq.head = 0; qp->sq.head = 0;
qp->sq.tail = 0; qp->sq.tail = 0;
qp->sq_next_wqe = 0;
if (!ibqp->srq) if (!ibqp->srq)
*qp->db.db = 0; *qp->db.db = 0;
} }
...@@ -1278,13 +1406,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1278,13 +1406,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags; unsigned long flags;
int nreq; int nreq;
int err = 0; int err = 0;
int ind; unsigned ind;
int size; int uninitialized_var(stamp);
int uninitialized_var(size);
int i; int i;
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
ind = qp->sq.head; ind = qp->sq_next_wqe;
for (nreq = 0; wr; ++nreq, wr = wr->next) { for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
...@@ -1300,7 +1429,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1300,7 +1429,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} }
ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1));
qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id; qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id;
ctrl->srcrb_flags = ctrl->srcrb_flags =
(wr->send_flags & IB_SEND_SIGNALED ? (wr->send_flags & IB_SEND_SIGNALED ?
...@@ -1413,16 +1542,23 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1413,16 +1542,23 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] |
(ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0);
stamp = ind + qp->sq_spare_wqes;
ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift);
/* /*
* We can improve latency by not stamping the last * We can improve latency by not stamping the last
* send queue WQE until after ringing the doorbell, so * send queue WQE until after ringing the doorbell, so
* only stamp here if there are still more WQEs to post. * only stamp here if there are still more WQEs to post.
*
* Same optimization applies to padding with NOP wqe
* in case of WQE shrinking (used to prevent wrap-around
* in the middle of WR).
*/ */
if (wr->next) if (wr->next) {
stamp_send_wqe(qp, (ind + qp->sq_spare_wqes) & stamp_send_wqe(qp, stamp, size * 16);
(qp->sq.wqe_cnt - 1)); ind = pad_wraparound(qp, ind);
}
++ind;
} }
out: out:
...@@ -1444,8 +1580,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1444,8 +1580,10 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
*/ */
mmiowb(); mmiowb();
stamp_send_wqe(qp, (ind + qp->sq_spare_wqes - 1) & stamp_send_wqe(qp, stamp, size * 16);
(qp->sq.wqe_cnt - 1));
ind = pad_wraparound(qp, ind);
qp->sq_next_wqe = ind;
} }
spin_unlock_irqrestore(&qp->sq.lock, flags); spin_unlock_irqrestore(&qp->sq.lock, flags);
......
...@@ -38,13 +38,7 @@ ...@@ -38,13 +38,7 @@
static void *get_wqe(struct mlx4_ib_srq *srq, int n) static void *get_wqe(struct mlx4_ib_srq *srq, int n)
{ {
int offset = n << srq->msrq.wqe_shift; return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
if (srq->buf.nbufs == 1)
return srq->buf.u.direct.buf + offset;
else
return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
} }
static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
......
...@@ -143,7 +143,7 @@ struct ipoib_rx_buf { ...@@ -143,7 +143,7 @@ struct ipoib_rx_buf {
struct ipoib_tx_buf { struct ipoib_tx_buf {
struct sk_buff *skb; struct sk_buff *skb;
u64 mapping; u64 mapping[MAX_SKB_FRAGS + 1];
}; };
struct ib_cm_id; struct ib_cm_id;
...@@ -296,7 +296,7 @@ struct ipoib_dev_priv { ...@@ -296,7 +296,7 @@ struct ipoib_dev_priv {
struct ipoib_tx_buf *tx_ring; struct ipoib_tx_buf *tx_ring;
unsigned tx_head; unsigned tx_head;
unsigned tx_tail; unsigned tx_tail;
struct ib_sge tx_sge; struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_send_wr tx_wr; struct ib_send_wr tx_wr;
unsigned tx_outstanding; unsigned tx_outstanding;
......
...@@ -634,8 +634,8 @@ static inline int post_send(struct ipoib_dev_priv *priv, ...@@ -634,8 +634,8 @@ static inline int post_send(struct ipoib_dev_priv *priv,
{ {
struct ib_send_wr *bad_wr; struct ib_send_wr *bad_wr;
priv->tx_sge.addr = addr; priv->tx_sge[0].addr = addr;
priv->tx_sge.length = len; priv->tx_sge[0].length = len;
priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
...@@ -676,7 +676,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ ...@@ -676,7 +676,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
return; return;
} }
tx_req->mapping = addr; tx_req->mapping[0] = addr;
if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len))) { addr, skb->len))) {
...@@ -715,7 +715,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -715,7 +715,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &tx->tx_ring[wr_id]; tx_req = &tx->tx_ring[wr_id];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE);
/* FIXME: is this right? Shouldn't we only increment on success? */ /* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets; ++dev->stats.tx_packets;
...@@ -1110,7 +1110,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) ...@@ -1110,7 +1110,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
while ((int) p->tx_tail - (int) p->tx_head < 0) { while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
++p->tx_tail; ++p->tx_tail;
......
...@@ -239,6 +239,54 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -239,6 +239,54 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
"for buf %d\n", wr_id); "for buf %d\n", wr_id);
} }
static int ipoib_dma_map_tx(struct ib_device *ca,
struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
int i;
mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
return -EIO;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping[i + 1] = ib_dma_map_page(ca, frag->page,
frag->page_offset, frag->size,
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1])))
goto partial_error;
}
return 0;
partial_error:
ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
for (; i > 0; --i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE);
}
return -EIO;
}
static void ipoib_dma_unmap_tx(struct ib_device *ca,
struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
int i;
ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ib_dma_unmap_page(ca, mapping[i + 1], frag->size,
DMA_TO_DEVICE);
}
}
static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
...@@ -257,8 +305,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -257,8 +305,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &priv->tx_ring[wr_id]; tx_req = &priv->tx_ring[wr_id];
ib_dma_unmap_single(priv->ca, tx_req->mapping, ipoib_dma_unmap_tx(priv->ca, tx_req);
tx_req->skb->len, DMA_TO_DEVICE);
++dev->stats.tx_packets; ++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len; dev->stats.tx_bytes += tx_req->skb->len;
...@@ -341,13 +388,20 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) ...@@ -341,13 +388,20 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
static inline int post_send(struct ipoib_dev_priv *priv, static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id, unsigned int wr_id,
struct ib_ah *address, u32 qpn, struct ib_ah *address, u32 qpn,
u64 addr, int len) u64 *mapping, int headlen,
skb_frag_t *frags,
int nr_frags)
{ {
struct ib_send_wr *bad_wr; struct ib_send_wr *bad_wr;
int i;
priv->tx_sge.addr = addr; priv->tx_sge[0].addr = mapping[0];
priv->tx_sge.length = len; priv->tx_sge[0].length = headlen;
for (i = 0; i < nr_frags; ++i) {
priv->tx_sge[i + 1].addr = mapping[i + 1];
priv->tx_sge[i + 1].length = frags[i].size;
}
priv->tx_wr.num_sge = nr_frags + 1;
priv->tx_wr.wr_id = wr_id; priv->tx_wr.wr_id = wr_id;
priv->tx_wr.wr.ud.remote_qpn = qpn; priv->tx_wr.wr.ud.remote_qpn = qpn;
priv->tx_wr.wr.ud.ah = address; priv->tx_wr.wr.ud.ah = address;
...@@ -360,7 +414,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -360,7 +414,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
{ {
struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req; struct ipoib_tx_buf *tx_req;
u64 addr;
if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
...@@ -383,20 +436,19 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -383,20 +436,19 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
*/ */
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb; tx_req->skb = skb;
addr = ib_dma_map_single(priv->ca, skb->data, skb->len, if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
++dev->stats.tx_errors; ++dev->stats.tx_errors;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return; return;
} }
tx_req->mapping = addr;
if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, addr, skb->len))) { address->ah, qpn,
tx_req->mapping, skb_headlen(skb),
skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) {
ipoib_warn(priv, "post_send failed\n"); ipoib_warn(priv, "post_send failed\n");
++dev->stats.tx_errors; ++dev->stats.tx_errors;
ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); ipoib_dma_unmap_tx(priv->ca, tx_req);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} else { } else {
dev->trans_start = jiffies; dev->trans_start = jiffies;
...@@ -615,10 +667,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) ...@@ -615,10 +667,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
while ((int) priv->tx_tail - (int) priv->tx_head < 0) { while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail & tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)]; (ipoib_sendq_size - 1)];
ib_dma_unmap_single(priv->ca, ipoib_dma_unmap_tx(priv->ca, tx_req);
tx_req->mapping,
tx_req->skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail; ++priv->tx_tail;
--priv->tx_outstanding; --priv->tx_outstanding;
......
...@@ -965,7 +965,9 @@ static void ipoib_setup(struct net_device *dev) ...@@ -965,7 +965,9 @@ static void ipoib_setup(struct net_device *dev)
dev->addr_len = INFINIBAND_ALEN; dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND; dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2; dev->tx_queue_len = ipoib_sendq_size * 2;
dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; dev->features = (NETIF_F_VLAN_CHALLENGED |
NETIF_F_LLTX |
NETIF_F_HIGHDMA);
/* MTU will be reset when mcast join happens */ /* MTU will be reset when mcast join happens */
dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
......
...@@ -157,6 +157,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -157,6 +157,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
}; };
int ret, size; int ret, size;
int i;
priv->pd = ib_alloc_pd(priv->ca); priv->pd = ib_alloc_pd(priv->ca);
if (IS_ERR(priv->pd)) { if (IS_ERR(priv->pd)) {
...@@ -191,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -191,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
init_attr.send_cq = priv->cq; init_attr.send_cq = priv->cq;
init_attr.recv_cq = priv->cq; init_attr.recv_cq = priv->cq;
if (dev->features & NETIF_F_SG)
init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
priv->qp = ib_create_qp(priv->pd, &init_attr); priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) { if (IS_ERR(priv->qp)) {
printk(KERN_WARNING "%s: failed to create QP\n", ca->name); printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
...@@ -201,11 +205,11 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) ...@@ -201,11 +205,11 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff; priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff;
priv->tx_sge.lkey = priv->mr->lkey; for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
priv->tx_sge[i].lkey = priv->mr->lkey;
priv->tx_wr.opcode = IB_WR_SEND; priv->tx_wr.opcode = IB_WR_SEND;
priv->tx_wr.sg_list = &priv->tx_sge; priv->tx_wr.sg_list = priv->tx_sge;
priv->tx_wr.num_sge = 1;
priv->tx_wr.send_flags = IB_SEND_SIGNALED; priv->tx_wr.send_flags = IB_SEND_SIGNALED;
return 0; return 0;
......
...@@ -116,40 +116,53 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, ...@@ -116,40 +116,53 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->nbufs = 1; buf->nbufs = 1;
buf->npages = 1; buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT; buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev, buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL); size, &t, GFP_KERNEL);
if (!buf->u.direct.buf) if (!buf->direct.buf)
return -ENOMEM; return -ENOMEM;
buf->u.direct.map = t; buf->direct.map = t;
while (t & ((1 << buf->page_shift) - 1)) { while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift; --buf->page_shift;
buf->npages *= 2; buf->npages *= 2;
} }
memset(buf->u.direct.buf, 0, size); memset(buf->direct.buf, 0, size);
} else { } else {
int i; int i;
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs; buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT; buf->page_shift = PAGE_SHIFT;
buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list, buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
GFP_KERNEL); GFP_KERNEL);
if (!buf->u.page_list) if (!buf->page_list)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) { for (i = 0; i < buf->nbufs; ++i) {
buf->u.page_list[i].buf = buf->page_list[i].buf =
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL); &t, GFP_KERNEL);
if (!buf->u.page_list[i].buf) if (!buf->page_list[i].buf)
goto err_free; goto err_free;
buf->u.page_list[i].map = t; buf->page_list[i].map = t;
memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); memset(buf->page_list[i].buf, 0, PAGE_SIZE);
}
if (BITS_PER_LONG == 64) {
struct page **pages;
pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
goto err_free;
} }
} }
...@@ -167,15 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) ...@@ -167,15 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
int i; int i;
if (buf->nbufs == 1) if (buf->nbufs == 1)
dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
buf->u.direct.map); buf->direct.map);
else { else {
if (BITS_PER_LONG == 64)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i) for (i = 0; i < buf->nbufs; ++i)
if (buf->u.page_list[i].buf) if (buf->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->u.page_list[i].buf, buf->page_list[i].buf,
buf->u.page_list[i].map); buf->page_list[i].map);
kfree(buf->u.page_list); kfree(buf->page_list);
} }
} }
EXPORT_SYMBOL_GPL(mlx4_buf_free); EXPORT_SYMBOL_GPL(mlx4_buf_free);
...@@ -419,9 +419,9 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, ...@@ -419,9 +419,9 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
for (i = 0; i < buf->npages; ++i) for (i = 0; i < buf->npages; ++i)
if (buf->nbufs == 1) if (buf->nbufs == 1)
page_list[i] = buf->u.direct.map + (i << buf->page_shift); page_list[i] = buf->direct.map + (i << buf->page_shift);
else else
page_list[i] = buf->u.page_list[i].map; page_list[i] = buf->page_list[i].map;
err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
......
...@@ -133,6 +133,11 @@ enum { ...@@ -133,6 +133,11 @@ enum {
MLX4_STAT_RATE_OFFSET = 5 MLX4_STAT_RATE_OFFSET = 5
}; };
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
}
struct mlx4_caps { struct mlx4_caps {
u64 fw_ver; u64 fw_ver;
int num_ports; int num_ports;
...@@ -189,10 +194,8 @@ struct mlx4_buf_list { ...@@ -189,10 +194,8 @@ struct mlx4_buf_list {
}; };
struct mlx4_buf { struct mlx4_buf {
union {
struct mlx4_buf_list direct; struct mlx4_buf_list direct;
struct mlx4_buf_list *page_list; struct mlx4_buf_list *page_list;
} u;
int nbufs; int nbufs;
int npages; int npages;
int page_shift; int page_shift;
...@@ -308,6 +311,14 @@ struct mlx4_init_port_param { ...@@ -308,6 +311,14 @@ struct mlx4_init_port_param {
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf); struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf); void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return buf->direct.buf + offset;
else
return buf->page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
}
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn); int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn); void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
......
...@@ -154,7 +154,11 @@ struct mlx4_qp_context { ...@@ -154,7 +154,11 @@ struct mlx4_qp_context {
u32 reserved5[10]; u32 reserved5[10];
}; };
/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
enum { enum {
MLX4_WQE_CTRL_NEC = 1 << 29,
MLX4_WQE_CTRL_FENCE = 1 << 6, MLX4_WQE_CTRL_FENCE = 1 << 6,
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2, MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
MLX4_WQE_CTRL_SOLICITED = 1 << 1, MLX4_WQE_CTRL_SOLICITED = 1 << 1,
......
...@@ -95,7 +95,15 @@ enum ib_device_cap_flags { ...@@ -95,7 +95,15 @@ enum ib_device_cap_flags {
IB_DEVICE_N_NOTIFY_CQ = (1<<14), IB_DEVICE_N_NOTIFY_CQ = (1<<14),
IB_DEVICE_ZERO_STAG = (1<<15), IB_DEVICE_ZERO_STAG = (1<<15),
IB_DEVICE_SEND_W_INV = (1<<16), IB_DEVICE_SEND_W_INV = (1<<16),
IB_DEVICE_MEM_WINDOW = (1<<17) IB_DEVICE_MEM_WINDOW = (1<<17),
/*
* Devices should set IB_DEVICE_UD_IP_SUM if they support
* insertion of UDP and TCP checksum on outgoing UD IPoIB
* messages and can verify the validity of checksum for
* incoming messages. Setting this flag implies that the
* IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
*/
IB_DEVICE_UD_IP_CSUM = (1<<18),
}; };
enum ib_atomic_cap { enum ib_atomic_cap {
...@@ -431,6 +439,7 @@ struct ib_wc { ...@@ -431,6 +439,7 @@ struct ib_wc {
u8 sl; u8 sl;
u8 dlid_path_bits; u8 dlid_path_bits;
u8 port_num; /* valid only for DR SMPs on switches */ u8 port_num; /* valid only for DR SMPs on switches */
int csum_ok;
}; };
enum ib_cq_notify_flags { enum ib_cq_notify_flags {
...@@ -615,7 +624,8 @@ enum ib_send_flags { ...@@ -615,7 +624,8 @@ enum ib_send_flags {
IB_SEND_FENCE = 1, IB_SEND_FENCE = 1,
IB_SEND_SIGNALED = (1<<1), IB_SEND_SIGNALED = (1<<1),
IB_SEND_SOLICITED = (1<<2), IB_SEND_SOLICITED = (1<<2),
IB_SEND_INLINE = (1<<3) IB_SEND_INLINE = (1<<3),
IB_SEND_IP_CSUM = (1<<4)
}; };
struct ib_sge { struct ib_sge {
...@@ -890,8 +900,6 @@ struct ib_device { ...@@ -890,8 +900,6 @@ struct ib_device {
int *pkey_tbl_len; int *pkey_tbl_len;
int *gid_tbl_len; int *gid_tbl_len;
u32 flags;
int num_comp_vectors; int num_comp_vectors;
struct iw_cm_verbs *iwcm; struct iw_cm_verbs *iwcm;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment