Commit f0e2dcff authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/core: Remove unused struct ib_device.flags member
  IB/core: Add IP checksum offload support
  IPoIB: Add send gather support
  IPoIB: Add high DMA feature flag
  IB/mlx4: Use multiple WQ blocks to post smaller send WQEs
  mlx4_core: Clean up struct mlx4_buf
  mlx4_core: For 64-bit systems, vmap() kernel queue buffers
  IB/mlx4: Consolidate code to get an entry from a struct mlx4_buf
parents 04a94bab 5128bdc9
......@@ -64,13 +64,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type)
static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n)
{
int offset = n * sizeof (struct mlx4_cqe);
if (buf->buf.nbufs == 1)
return buf->buf.u.direct.buf + offset;
else
return buf->buf.u.page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe));
}
static void *get_cqe(struct mlx4_ib_cq *cq, int n)
......@@ -332,6 +326,12 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR;
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP &&
is_send)) {
printk(KERN_WARNING "Completion for NOP opcode detected!\n");
return -EINVAL;
}
if (!*cur_qp ||
(be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) {
/*
......@@ -354,8 +354,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq,
if (is_send) {
wq = &(*cur_qp)->sq;
wqe_ctr = be16_to_cpu(cqe->wqe_index);
wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
if (!(*cur_qp)->sq_signal_bits) {
wqe_ctr = be16_to_cpu(cqe->wqe_index);
wq->tail += (u16) (wqe_ctr - (u16) wq->tail);
}
wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)];
++wq->tail;
} else if ((*cur_qp)->ibqp.srq) {
......
......@@ -120,6 +120,8 @@ struct mlx4_ib_qp {
u32 doorbell_qpn;
__be32 sq_signal_bits;
unsigned sq_next_wqe;
int sq_max_wqes_per_wr;
int sq_spare_wqes;
struct mlx4_ib_wq sq;
......
This diff is collapsed.
......@@ -38,13 +38,7 @@
static void *get_wqe(struct mlx4_ib_srq *srq, int n)
{
int offset = n << srq->msrq.wqe_shift;
if (srq->buf.nbufs == 1)
return srq->buf.u.direct.buf + offset;
else
return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift);
}
static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type)
......
......@@ -143,7 +143,7 @@ struct ipoib_rx_buf {
struct ipoib_tx_buf {
struct sk_buff *skb;
u64 mapping;
u64 mapping[MAX_SKB_FRAGS + 1];
};
struct ib_cm_id;
......@@ -296,7 +296,7 @@ struct ipoib_dev_priv {
struct ipoib_tx_buf *tx_ring;
unsigned tx_head;
unsigned tx_tail;
struct ib_sge tx_sge;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_send_wr tx_wr;
unsigned tx_outstanding;
......
......@@ -634,8 +634,8 @@ static inline int post_send(struct ipoib_dev_priv *priv,
{
struct ib_send_wr *bad_wr;
priv->tx_sge.addr = addr;
priv->tx_sge.length = len;
priv->tx_sge[0].addr = addr;
priv->tx_sge[0].length = len;
priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
......@@ -676,7 +676,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
return;
}
tx_req->mapping = addr;
tx_req->mapping[0] = addr;
if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len))) {
......@@ -715,7 +715,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &tx->tx_ring[wr_id];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE);
/* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets;
......@@ -1110,7 +1110,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb);
++p->tx_tail;
......
......@@ -239,6 +239,54 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
"for buf %d\n", wr_id);
}
static int ipoib_dma_map_tx(struct ib_device *ca,
struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
int i;
mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
return -EIO;
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
mapping[i + 1] = ib_dma_map_page(ca, frag->page,
frag->page_offset, frag->size,
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1])))
goto partial_error;
}
return 0;
partial_error:
ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
for (; i > 0; --i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE);
}
return -EIO;
}
static void ipoib_dma_unmap_tx(struct ib_device *ca,
struct ipoib_tx_buf *tx_req)
{
struct sk_buff *skb = tx_req->skb;
u64 *mapping = tx_req->mapping;
int i;
ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
ib_dma_unmap_page(ca, mapping[i + 1], frag->size,
DMA_TO_DEVICE);
}
}
static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
......@@ -257,8 +305,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
tx_req = &priv->tx_ring[wr_id];
ib_dma_unmap_single(priv->ca, tx_req->mapping,
tx_req->skb->len, DMA_TO_DEVICE);
ipoib_dma_unmap_tx(priv->ca, tx_req);
++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len;
......@@ -341,16 +388,23 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr)
static inline int post_send(struct ipoib_dev_priv *priv,
unsigned int wr_id,
struct ib_ah *address, u32 qpn,
u64 addr, int len)
u64 *mapping, int headlen,
skb_frag_t *frags,
int nr_frags)
{
struct ib_send_wr *bad_wr;
int i;
priv->tx_sge.addr = addr;
priv->tx_sge.length = len;
priv->tx_wr.wr_id = wr_id;
priv->tx_wr.wr.ud.remote_qpn = qpn;
priv->tx_wr.wr.ud.ah = address;
priv->tx_sge[0].addr = mapping[0];
priv->tx_sge[0].length = headlen;
for (i = 0; i < nr_frags; ++i) {
priv->tx_sge[i + 1].addr = mapping[i + 1];
priv->tx_sge[i + 1].length = frags[i].size;
}
priv->tx_wr.num_sge = nr_frags + 1;
priv->tx_wr.wr_id = wr_id;
priv->tx_wr.wr.ud.remote_qpn = qpn;
priv->tx_wr.wr.ud.ah = address;
return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr);
}
......@@ -360,7 +414,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_tx_buf *tx_req;
u64 addr;
if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
......@@ -383,20 +436,19 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb,
*/
tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
addr = ib_dma_map_single(priv->ca, skb->data, skb->len,
DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
tx_req->mapping = addr;
if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1),
address->ah, qpn, addr, skb->len))) {
address->ah, qpn,
tx_req->mapping, skb_headlen(skb),
skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) {
ipoib_warn(priv, "post_send failed\n");
++dev->stats.tx_errors;
ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
ipoib_dma_unmap_tx(priv->ca, tx_req);
dev_kfree_skb_any(skb);
} else {
dev->trans_start = jiffies;
......@@ -615,10 +667,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush)
while ((int) priv->tx_tail - (int) priv->tx_head < 0) {
tx_req = &priv->tx_ring[priv->tx_tail &
(ipoib_sendq_size - 1)];
ib_dma_unmap_single(priv->ca,
tx_req->mapping,
tx_req->skb->len,
DMA_TO_DEVICE);
ipoib_dma_unmap_tx(priv->ca, tx_req);
dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail;
--priv->tx_outstanding;
......
......@@ -965,7 +965,9 @@ static void ipoib_setup(struct net_device *dev)
dev->addr_len = INFINIBAND_ALEN;
dev->type = ARPHRD_INFINIBAND;
dev->tx_queue_len = ipoib_sendq_size * 2;
dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
dev->features = (NETIF_F_VLAN_CHALLENGED |
NETIF_F_LLTX |
NETIF_F_HIGHDMA);
/* MTU will be reset when mcast join happens */
dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
......
......@@ -157,6 +157,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
};
int ret, size;
int i;
priv->pd = ib_alloc_pd(priv->ca);
if (IS_ERR(priv->pd)) {
......@@ -191,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
init_attr.send_cq = priv->cq;
init_attr.recv_cq = priv->cq;
if (dev->features & NETIF_F_SG)
init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1;
priv->qp = ib_create_qp(priv->pd, &init_attr);
if (IS_ERR(priv->qp)) {
printk(KERN_WARNING "%s: failed to create QP\n", ca->name);
......@@ -201,11 +205,11 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff;
priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff;
priv->tx_sge.lkey = priv->mr->lkey;
for (i = 0; i < MAX_SKB_FRAGS + 1; ++i)
priv->tx_sge[i].lkey = priv->mr->lkey;
priv->tx_wr.opcode = IB_WR_SEND;
priv->tx_wr.sg_list = &priv->tx_sge;
priv->tx_wr.num_sge = 1;
priv->tx_wr.sg_list = priv->tx_sge;
priv->tx_wr.send_flags = IB_SEND_SIGNALED;
return 0;
......
......@@ -116,40 +116,53 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
buf->nbufs = 1;
buf->npages = 1;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
size, &t, GFP_KERNEL);
if (!buf->u.direct.buf)
if (!buf->direct.buf)
return -ENOMEM;
buf->u.direct.map = t;
buf->direct.map = t;
while (t & ((1 << buf->page_shift) - 1)) {
--buf->page_shift;
buf->npages *= 2;
}
memset(buf->u.direct.buf, 0, size);
memset(buf->direct.buf, 0, size);
} else {
int i;
buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list,
buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list,
GFP_KERNEL);
if (!buf->u.page_list)
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->u.page_list[i].buf =
buf->page_list[i].buf =
dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
&t, GFP_KERNEL);
if (!buf->u.page_list[i].buf)
if (!buf->page_list[i].buf)
goto err_free;
buf->u.page_list[i].map = t;
buf->page_list[i].map = t;
memset(buf->u.page_list[i].buf, 0, PAGE_SIZE);
memset(buf->page_list[i].buf, 0, PAGE_SIZE);
}
if (BITS_PER_LONG == 64) {
struct page **pages;
pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL);
if (!pages)
goto err_free;
for (i = 0; i < buf->nbufs; ++i)
pages[i] = virt_to_page(buf->page_list[i].buf);
buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
kfree(pages);
if (!buf->direct.buf)
goto err_free;
}
}
......@@ -167,15 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
int i;
if (buf->nbufs == 1)
dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf,
buf->u.direct.map);
dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
buf->direct.map);
else {
if (BITS_PER_LONG == 64)
vunmap(buf->direct.buf);
for (i = 0; i < buf->nbufs; ++i)
if (buf->u.page_list[i].buf)
if (buf->page_list[i].buf)
dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
buf->u.page_list[i].buf,
buf->u.page_list[i].map);
kfree(buf->u.page_list);
buf->page_list[i].buf,
buf->page_list[i].map);
kfree(buf->page_list);
}
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
......@@ -419,9 +419,9 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
for (i = 0; i < buf->npages; ++i)
if (buf->nbufs == 1)
page_list[i] = buf->u.direct.map + (i << buf->page_shift);
page_list[i] = buf->direct.map + (i << buf->page_shift);
else
page_list[i] = buf->u.page_list[i].map;
page_list[i] = buf->page_list[i].map;
err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list);
......
......@@ -133,6 +133,11 @@ enum {
MLX4_STAT_RATE_OFFSET = 5
};
static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
{
return (major << 32) | (minor << 16) | subminor;
}
struct mlx4_caps {
u64 fw_ver;
int num_ports;
......@@ -189,10 +194,8 @@ struct mlx4_buf_list {
};
struct mlx4_buf {
union {
struct mlx4_buf_list direct;
struct mlx4_buf_list *page_list;
} u;
struct mlx4_buf_list direct;
struct mlx4_buf_list *page_list;
int nbufs;
int npages;
int page_shift;
......@@ -308,6 +311,14 @@ struct mlx4_init_port_param {
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
if (BITS_PER_LONG == 64 || buf->nbufs == 1)
return buf->direct.buf + offset;
else
return buf->page_list[offset >> PAGE_SHIFT].buf +
(offset & (PAGE_SIZE - 1));
}
int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn);
void mlx4_pd_free(struct mlx4_dev *dev, u32 pdn);
......
......@@ -154,7 +154,11 @@ struct mlx4_qp_context {
u32 reserved5[10];
};
/* Which firmware version adds support for NEC (NoErrorCompletion) bit */
#define MLX4_FW_VER_WQE_CTRL_NEC mlx4_fw_ver(2, 2, 232)
enum {
MLX4_WQE_CTRL_NEC = 1 << 29,
MLX4_WQE_CTRL_FENCE = 1 << 6,
MLX4_WQE_CTRL_CQ_UPDATE = 3 << 2,
MLX4_WQE_CTRL_SOLICITED = 1 << 1,
......
......@@ -95,7 +95,15 @@ enum ib_device_cap_flags {
IB_DEVICE_N_NOTIFY_CQ = (1<<14),
IB_DEVICE_ZERO_STAG = (1<<15),
IB_DEVICE_SEND_W_INV = (1<<16),
IB_DEVICE_MEM_WINDOW = (1<<17)
IB_DEVICE_MEM_WINDOW = (1<<17),
/*
* Devices should set IB_DEVICE_UD_IP_SUM if they support
* insertion of UDP and TCP checksum on outgoing UD IPoIB
* messages and can verify the validity of checksum for
* incoming messages. Setting this flag implies that the
* IPoIB driver may set NETIF_F_IP_CSUM for datagram mode.
*/
IB_DEVICE_UD_IP_CSUM = (1<<18),
};
enum ib_atomic_cap {
......@@ -431,6 +439,7 @@ struct ib_wc {
u8 sl;
u8 dlid_path_bits;
u8 port_num; /* valid only for DR SMPs on switches */
int csum_ok;
};
enum ib_cq_notify_flags {
......@@ -615,7 +624,8 @@ enum ib_send_flags {
IB_SEND_FENCE = 1,
IB_SEND_SIGNALED = (1<<1),
IB_SEND_SOLICITED = (1<<2),
IB_SEND_INLINE = (1<<3)
IB_SEND_INLINE = (1<<3),
IB_SEND_IP_CSUM = (1<<4)
};
struct ib_sge {
......@@ -890,8 +900,6 @@ struct ib_device {
int *pkey_tbl_len;
int *gid_tbl_len;
u32 flags;
int num_comp_vectors;
struct iw_cm_verbs *iwcm;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment