Commit b832be1e authored by Eli Cohen's avatar Eli Cohen Committed by Roland Dreier

IB/mlx4: Add IPoIB LSO support

Add TSO support to the mlx4_ib driver.
Signed-off-by: default avatarEli Cohen <eli@mellanox.co.il>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 40ca1988
...@@ -420,6 +420,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, ...@@ -420,6 +420,9 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
case MLX4_OPCODE_BIND_MW: case MLX4_OPCODE_BIND_MW:
wc->opcode = IB_WC_BIND_MW; wc->opcode = IB_WC_BIND_MW;
break; break;
case MLX4_OPCODE_LSO:
wc->opcode = IB_WC_LSO;
break;
} }
} else { } else {
wc->byte_len = be32_to_cpu(cqe->byte_cnt); wc->byte_len = be32_to_cpu(cqe->byte_cnt);
......
...@@ -101,6 +101,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, ...@@ -101,6 +101,8 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
if (dev->dev->caps.max_gso_sz)
props->device_cap_flags |= IB_DEVICE_UD_TSO;
props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) & props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
0xffffff; 0xffffff;
......
...@@ -110,6 +110,10 @@ struct mlx4_ib_wq { ...@@ -110,6 +110,10 @@ struct mlx4_ib_wq {
unsigned tail; unsigned tail;
}; };
enum mlx4_ib_qp_flags {
MLX4_IB_QP_LSO = 1 << 0
};
struct mlx4_ib_qp { struct mlx4_ib_qp {
struct ib_qp ibqp; struct ib_qp ibqp;
struct mlx4_qp mqp; struct mlx4_qp mqp;
...@@ -129,6 +133,7 @@ struct mlx4_ib_qp { ...@@ -129,6 +133,7 @@ struct mlx4_ib_qp {
struct mlx4_mtt mtt; struct mlx4_mtt mtt;
int buf_size; int buf_size;
struct mutex mutex; struct mutex mutex;
u32 flags;
u8 port; u8 port;
u8 alt_port; u8 alt_port;
u8 atomic_rd_en; u8 atomic_rd_en;
......
...@@ -71,6 +71,7 @@ enum { ...@@ -71,6 +71,7 @@ enum {
static const __be32 mlx4_ib_opcode[] = { static const __be32 mlx4_ib_opcode[] = {
[IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND), [IB_WR_SEND] = __constant_cpu_to_be32(MLX4_OPCODE_SEND),
[IB_WR_LSO] = __constant_cpu_to_be32(MLX4_OPCODE_LSO),
[IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM), [IB_WR_SEND_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_SEND_IMM),
[IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE), [IB_WR_RDMA_WRITE] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE),
[IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM), [IB_WR_RDMA_WRITE_WITH_IMM] = __constant_cpu_to_be32(MLX4_OPCODE_RDMA_WRITE_IMM),
...@@ -242,7 +243,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) ...@@ -242,7 +243,7 @@ static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type)
} }
} }
static int send_wqe_overhead(enum ib_qp_type type) static int send_wqe_overhead(enum ib_qp_type type, u32 flags)
{ {
/* /*
* UD WQEs must have a datagram segment. * UD WQEs must have a datagram segment.
...@@ -253,7 +254,8 @@ static int send_wqe_overhead(enum ib_qp_type type) ...@@ -253,7 +254,8 @@ static int send_wqe_overhead(enum ib_qp_type type)
switch (type) { switch (type) {
case IB_QPT_UD: case IB_QPT_UD:
return sizeof (struct mlx4_wqe_ctrl_seg) + return sizeof (struct mlx4_wqe_ctrl_seg) +
sizeof (struct mlx4_wqe_datagram_seg); sizeof (struct mlx4_wqe_datagram_seg) +
((flags & MLX4_IB_QP_LSO) ? 64 : 0);
case IB_QPT_UC: case IB_QPT_UC:
return sizeof (struct mlx4_wqe_ctrl_seg) + return sizeof (struct mlx4_wqe_ctrl_seg) +
sizeof (struct mlx4_wqe_raddr_seg); sizeof (struct mlx4_wqe_raddr_seg);
...@@ -315,7 +317,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, ...@@ -315,7 +317,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
/* Sanity check SQ size before proceeding */ /* Sanity check SQ size before proceeding */
if (cap->max_send_wr > dev->dev->caps.max_wqes || if (cap->max_send_wr > dev->dev->caps.max_wqes ||
cap->max_send_sge > dev->dev->caps.max_sq_sg || cap->max_send_sge > dev->dev->caps.max_sq_sg ||
cap->max_inline_data + send_wqe_overhead(type) + cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
return -EINVAL; return -EINVAL;
...@@ -329,7 +331,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, ...@@ -329,7 +331,7 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg),
cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) +
send_wqe_overhead(type); send_wqe_overhead(type, qp->flags);
/* /*
* Hermon supports shrinking WQEs, such that a single work * Hermon supports shrinking WQEs, such that a single work
...@@ -394,7 +396,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, ...@@ -394,7 +396,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
} }
qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) -
send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg); send_wqe_overhead(type, qp->flags)) /
sizeof (struct mlx4_wqe_data_seg);
qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << qp->sq.wqe_shift); (qp->sq.wqe_cnt << qp->sq.wqe_shift);
...@@ -503,6 +506,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, ...@@ -503,6 +506,9 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
} else { } else {
qp->sq_no_prefetch = 0; qp->sq_no_prefetch = 0;
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
qp->flags |= MLX4_IB_QP_LSO;
err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp); err = set_kernel_sq_size(dev, &init_attr->cap, init_attr->qp_type, qp);
if (err) if (err)
goto err; goto err;
...@@ -673,7 +679,11 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd, ...@@ -673,7 +679,11 @@ struct ib_qp *mlx4_ib_create_qp(struct ib_pd *pd,
struct mlx4_ib_qp *qp; struct mlx4_ib_qp *qp;
int err; int err;
if (init_attr->create_flags) /* We only support LSO, and only for kernel UD QPs. */
if (init_attr->create_flags & ~IB_QP_CREATE_IPOIB_UD_LSO)
return ERR_PTR(-EINVAL);
if (init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO &&
(pd->uobject || init_attr->qp_type != IB_QPT_UD))
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
switch (init_attr->qp_type) { switch (init_attr->qp_type) {
...@@ -879,10 +889,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, ...@@ -879,10 +889,15 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
} }
} }
if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI || if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI)
ibqp->qp_type == IB_QPT_UD)
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11; context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
else if (attr_mask & IB_QP_PATH_MTU) { else if (ibqp->qp_type == IB_QPT_UD) {
if (qp->flags & MLX4_IB_QP_LSO)
context->mtu_msgmax = (IB_MTU_4096 << 5) |
ilog2(dev->dev->caps.max_gso_sz);
else
context->mtu_msgmax = (IB_MTU_4096 << 5) | 11;
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) { if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
printk(KERN_ERR "path MTU (%u) is invalid\n", printk(KERN_ERR "path MTU (%u) is invalid\n",
attr->path_mtu); attr->path_mtu);
...@@ -1399,6 +1414,34 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg) ...@@ -1399,6 +1414,34 @@ static void __set_data_seg(struct mlx4_wqe_data_seg *dseg, struct ib_sge *sg)
dseg->addr = cpu_to_be64(sg->addr); dseg->addr = cpu_to_be64(sg->addr);
} }
static int build_lso_seg(struct mlx4_lso_seg *wqe, struct ib_send_wr *wr,
struct mlx4_ib_qp *qp, unsigned *lso_seg_len)
{
unsigned halign = ALIGN(sizeof *wqe + wr->wr.ud.hlen, 16);
/*
* This is a temporary limitation and will be removed in
* a forthcoming FW release:
*/
if (unlikely(halign > 64))
return -EINVAL;
if (unlikely(!(qp->flags & MLX4_IB_QP_LSO) &&
wr->num_sge > qp->sq.max_gs - (halign >> 4)))
return -EINVAL;
memcpy(wqe->header, wr->wr.ud.header, wr->wr.ud.hlen);
/* make sure LSO header is written before overwriting stamping */
wmb();
wqe->mss_hdr_size = cpu_to_be32((wr->wr.ud.mss - wr->wr.ud.hlen) << 16 |
wr->wr.ud.hlen);
*lso_seg_len = halign;
return 0;
}
int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr) struct ib_send_wr **bad_wr)
{ {
...@@ -1412,6 +1455,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1412,6 +1455,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned ind; unsigned ind;
int uninitialized_var(stamp); int uninitialized_var(stamp);
int uninitialized_var(size); int uninitialized_var(size);
unsigned seglen;
int i; int i;
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
...@@ -1490,6 +1534,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -1490,6 +1534,16 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
set_datagram_seg(wqe, wr); set_datagram_seg(wqe, wr);
wqe += sizeof (struct mlx4_wqe_datagram_seg); wqe += sizeof (struct mlx4_wqe_datagram_seg);
size += sizeof (struct mlx4_wqe_datagram_seg) / 16; size += sizeof (struct mlx4_wqe_datagram_seg) / 16;
if (wr->opcode == IB_WR_LSO) {
err = build_lso_seg(wqe, wr, qp, &seglen);
if (unlikely(err)) {
*bad_wr = wr;
goto out;
}
wqe += seglen;
size += seglen / 16;
}
break; break;
case IB_QPT_SMI: case IB_QPT_SMI:
......
...@@ -133,6 +133,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -133,6 +133,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
#define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27 #define QUERY_DEV_CAP_MAX_AV_OFFSET 0x27
#define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29 #define QUERY_DEV_CAP_MAX_REQ_QP_OFFSET 0x29
#define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b #define QUERY_DEV_CAP_MAX_RES_QP_OFFSET 0x2b
#define QUERY_DEV_CAP_MAX_GSO_OFFSET 0x2d
#define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f #define QUERY_DEV_CAP_MAX_RDMA_OFFSET 0x2f
#define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33 #define QUERY_DEV_CAP_RSZ_SRQ_OFFSET 0x33
#define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35 #define QUERY_DEV_CAP_ACK_DELAY_OFFSET 0x35
...@@ -215,6 +216,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -215,6 +216,13 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_requester_per_qp = 1 << (field & 0x3f); dev_cap->max_requester_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RES_QP_OFFSET);
dev_cap->max_responder_per_qp = 1 << (field & 0x3f); dev_cap->max_responder_per_qp = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_GSO_OFFSET);
field &= 0x1f;
if (!field)
dev_cap->max_gso_sz = 0;
else
dev_cap->max_gso_sz = 1 << field;
MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_RDMA_OFFSET);
dev_cap->max_rdma_global = 1 << (field & 0x3f); dev_cap->max_rdma_global = 1 << (field & 0x3f);
MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET); MLX4_GET(field, outbox, QUERY_DEV_CAP_ACK_DELAY_OFFSET);
...@@ -377,6 +385,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -377,6 +385,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg); dev_cap->max_sq_desc_sz, dev_cap->max_sq_sg);
mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n", mlx4_dbg(dev, "Max RQ desc size: %d, max RQ S/G: %d\n",
dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg); dev_cap->max_rq_desc_sz, dev_cap->max_rq_sg);
mlx4_dbg(dev, "Max GSO size: %d\n", dev_cap->max_gso_sz);
dump_dev_cap_flags(dev, dev_cap->flags); dump_dev_cap_flags(dev, dev_cap->flags);
......
...@@ -96,6 +96,7 @@ struct mlx4_dev_cap { ...@@ -96,6 +96,7 @@ struct mlx4_dev_cap {
u8 bmme_flags; u8 bmme_flags;
u32 reserved_lkey; u32 reserved_lkey;
u64 max_icm_sz; u64 max_icm_sz;
int max_gso_sz;
}; };
struct mlx4_adapter { struct mlx4_adapter {
......
...@@ -159,6 +159,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) ...@@ -159,6 +159,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1); dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags; dev->caps.flags = dev_cap->flags;
dev->caps.stat_rate_support = dev_cap->stat_rate_support; dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
return 0; return 0;
} }
......
...@@ -186,6 +186,7 @@ struct mlx4_caps { ...@@ -186,6 +186,7 @@ struct mlx4_caps {
u32 flags; u32 flags;
u16 stat_rate_support; u16 stat_rate_support;
u8 port_width_cap[MLX4_MAX_PORTS + 1]; u8 port_width_cap[MLX4_MAX_PORTS + 1];
int max_gso_sz;
}; };
struct mlx4_buf_list { struct mlx4_buf_list {
......
...@@ -219,6 +219,11 @@ struct mlx4_wqe_datagram_seg { ...@@ -219,6 +219,11 @@ struct mlx4_wqe_datagram_seg {
__be32 reservd[2]; __be32 reservd[2];
}; };
struct mlx4_lso_seg {
__be32 mss_hdr_size;
__be32 header[0];
};
struct mlx4_wqe_bind_seg { struct mlx4_wqe_bind_seg {
__be32 flags1; __be32 flags1;
__be32 flags2; __be32 flags2;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment