Commit 2c104ea6 authored by Erez Shitrit's avatar Erez Shitrit Committed by Doug Ledford

IB/ipoib: Get rid of the tx_outstanding variable in all modes

The first step toward using NAPI in the UD/TX flow is to separate
between two flows, the NAPI and the xmit, meaning no use of shared
variables between both flows.

This patch takes out the tx_outstanding variable that was used in both
flows and instead the driver uses the 2 cyclic ring variables: tx_head
and tx_tail, tx_head used in the xmit flow and tx_tail in the NAPI flow.

Cc: Kamal Heib <kamalh@mellanox.com>
Signed-off-by: default avatarErez Shitrit <erezsh@mellanox.com>
Reviewed-by: default avatarAlex Vesker <valex@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 5556176d
...@@ -381,7 +381,6 @@ struct ipoib_dev_priv { ...@@ -381,7 +381,6 @@ struct ipoib_dev_priv {
unsigned tx_tail; unsigned tx_tail;
struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; struct ib_sge tx_sge[MAX_SKB_FRAGS + 1];
struct ib_ud_wr tx_wr; struct ib_ud_wr tx_wr;
unsigned tx_outstanding;
struct ib_wc send_wc[MAX_SEND_CQE]; struct ib_wc send_wc[MAX_SEND_CQE];
struct ib_recv_wr rx_wr; struct ib_recv_wr rx_wr;
......
...@@ -769,8 +769,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ ...@@ -769,8 +769,8 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_
} else { } else {
netif_trans_update(dev); netif_trans_update(dev);
++tx->tx_head; ++tx->tx_head;
++priv->tx_head;
if (++priv->tx_outstanding == ipoib_sendq_size) { if ((priv->tx_head - priv->tx_tail) == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n", ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num); tx->qp->qp_num);
netif_stop_queue(dev); netif_stop_queue(dev);
...@@ -814,7 +814,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -814,7 +814,8 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
netif_tx_lock(dev); netif_tx_lock(dev);
++tx->tx_tail; ++tx->tx_tail;
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && ++priv->tx_tail;
if (unlikely((priv->tx_head - priv->tx_tail) == ipoib_sendq_size >> 1) &&
netif_queue_stopped(dev) && netif_queue_stopped(dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(dev); netif_wake_queue(dev);
...@@ -1220,8 +1221,9 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) ...@@ -1220,8 +1221,9 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
ipoib_dma_unmap_tx(priv, tx_req); ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
++p->tx_tail; ++p->tx_tail;
++priv->tx_tail;
netif_tx_lock_bh(p->dev); netif_tx_lock_bh(p->dev);
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && if (unlikely(priv->tx_head - priv->tx_tail == ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) && netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev); netif_wake_queue(p->dev);
......
...@@ -406,7 +406,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) ...@@ -406,7 +406,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail; ++priv->tx_tail;
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && if (unlikely((priv->tx_head - priv->tx_tail) == ipoib_sendq_size >> 1) &&
netif_queue_stopped(dev) && netif_queue_stopped(dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(dev); netif_wake_queue(dev);
...@@ -611,8 +611,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -611,8 +611,8 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM; priv->tx_wr.wr.send_flags |= IB_SEND_IP_CSUM;
else else
priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM; priv->tx_wr.wr.send_flags &= ~IB_SEND_IP_CSUM;
/* increase the tx_head after send success, but use it for queue state */
if (++priv->tx_outstanding == ipoib_sendq_size) { if (priv->tx_head - priv->tx_tail == ipoib_sendq_size - 1) {
ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n");
if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP))
ipoib_warn(priv, "request notify on send CQ failed\n"); ipoib_warn(priv, "request notify on send CQ failed\n");
...@@ -627,7 +627,6 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -627,7 +627,6 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
if (unlikely(rc)) { if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc); ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors; ++dev->stats.tx_errors;
--priv->tx_outstanding;
ipoib_dma_unmap_tx(priv, tx_req); ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (netif_queue_stopped(dev)) if (netif_queue_stopped(dev))
...@@ -640,7 +639,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb, ...@@ -640,7 +639,7 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
++priv->tx_head; ++priv->tx_head;
} }
if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) if (unlikely(priv->tx_head - priv->tx_tail > MAX_SEND_CQE))
while (poll_tx(priv)) while (poll_tx(priv))
; /* nothing */ ; /* nothing */
...@@ -773,7 +772,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) ...@@ -773,7 +772,6 @@ int ipoib_ib_dev_stop_default(struct net_device *dev)
ipoib_dma_unmap_tx(priv, tx_req); ipoib_dma_unmap_tx(priv, tx_req);
dev_kfree_skb_any(tx_req->skb); dev_kfree_skb_any(tx_req->skb);
++priv->tx_tail; ++priv->tx_tail;
--priv->tx_outstanding;
} }
for (i = 0; i < ipoib_recvq_size; ++i) { for (i = 0; i < ipoib_recvq_size; ++i) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment