Commit 8f6a2b05 authored by Jesse Brandeburg's avatar Jesse Brandeburg Committed by Jeff Kirsher

i40evf: skb->xmit_more support

Eric added support for skb->xmit_more in i40e, this ports that into
i40evf as well.

Support skb->xmit_more in i40evf is straightforward; we need to move
around i40e_maybe_stop_tx() call to correctly test netif_xmit_stopped()
before taking the decision to not kick the NIC.

Change-ID: Idddda6a2e4a7ab335631c91ced51f55b25eb8468
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDaniel Borkmann <dborkman@redhat.com>
Signed-off-by: default avatarJesse Brandeburg <jesse.brandeburg@intel.com>
Tested-by: default avatarJim Young <james.m.young@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 2e4875e3
...@@ -1669,6 +1669,47 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags, ...@@ -1669,6 +1669,47 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
return linearize; return linearize;
} }
/**
* __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns -EBUSY if a stop is needed, else 0
**/
static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
return 0;
}
/**
* i40evf_maybe_stop_tx - 1st level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
#ifdef I40E_FCOE
int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else
static int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
return __i40evf_maybe_stop_tx(tx_ring, size);
}
/** /**
* i40e_tx_map - Build the Tx descriptor * i40e_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
...@@ -1806,7 +1847,11 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1806,7 +1847,11 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */ /* notify HW of packet */
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)))
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
return; return;
...@@ -1828,43 +1873,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1828,43 +1873,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
} }
/**
* __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns -EBUSY if a stop is needed, else 0
**/
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */
smp_mb();
/* Check again in a case another CPU has just made room available. */
if (likely(I40E_DESC_UNUSED(tx_ring) < size))
return -EBUSY;
/* A reprieve! - use start_queue because it doesn't call schedule */
netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
++tx_ring->tx_stats.restart_queue;
return 0;
}
/**
* i40e_maybe_stop_tx - 1st level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
return __i40e_maybe_stop_tx(tx_ring, size);
}
/** /**
* i40e_xmit_descriptor_count - calculate number of tx descriptors needed * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
* @skb: send buffer * @skb: send buffer
...@@ -1890,7 +1898,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb, ...@@ -1890,7 +1898,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
count += TXD_USE_COUNT(skb_headlen(skb)); count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) { if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++; tx_ring->tx_stats.tx_busy++;
return 0; return 0;
} }
...@@ -1966,8 +1974,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1966,8 +1974,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
td_cmd, td_offset); td_cmd, td_offset);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
return NETDEV_TX_OK; return NETDEV_TX_OK;
out_drop: out_drop:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment