Commit e486bdfd authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

i40e/i40evf: Add txring_txq function to match fm10k and ixgbe

This patch adds a txring_txq function which allows us to convert a
i40e_ring/i40evf_ring to a netdev_tx_queue structure.  This way we
can avoid having to make a multi-line function call for all the spots
that need access to this.

Change-ID: Ic063b71d8b92ea406d2c32e798c8e2b02809d65b
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 64bfd68e
...@@ -584,8 +584,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring) ...@@ -584,8 +584,7 @@ void i40e_clean_tx_ring(struct i40e_ring *tx_ring)
return; return;
/* cleanup Tx queue statistics */ /* cleanup Tx queue statistics */
netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring->queue_index));
} }
/** /**
...@@ -754,8 +753,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -754,8 +753,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
} }
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, /* notify netdev of completed buffers */
tx_ring->queue_index), netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes); total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
...@@ -2784,9 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2784,9 +2783,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
tx_ring->queue_index),
first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting: /* Algorithm to optimize tail and RS bit setting:
...@@ -2811,13 +2808,11 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2811,13 +2808,11 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB. * trigger a force WB.
*/ */
if (skb->xmit_more && if (skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->queue_index))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false; tail_bump = false;
} else if (!skb->xmit_more && } else if (!skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, !netif_xmit_stopped(txring_txq(tx_ring)) &&
tx_ring->queue_index)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) && (tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) { (desc_count < WB_STRIDE)) {
......
...@@ -463,4 +463,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype) ...@@ -463,4 +463,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
} }
/**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
#endif /* _I40E_TXRX_H_ */ #endif /* _I40E_TXRX_H_ */
...@@ -103,8 +103,7 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring) ...@@ -103,8 +103,7 @@ void i40evf_clean_tx_ring(struct i40e_ring *tx_ring)
return; return;
/* cleanup Tx queue statistics */ /* cleanup Tx queue statistics */
netdev_tx_reset_queue(netdev_get_tx_queue(tx_ring->netdev, netdev_tx_reset_queue(txring_txq(tx_ring));
tx_ring->queue_index));
} }
/** /**
...@@ -273,8 +272,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -273,8 +272,8 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
tx_ring->arm_wb = true; tx_ring->arm_wb = true;
} }
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, /* notify netdev of completed buffers */
tx_ring->queue_index), netdev_tx_completed_queue(txring_txq(tx_ring),
total_packets, total_bytes); total_packets, total_bytes);
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
...@@ -2012,9 +2011,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2012,9 +2011,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, netdev_tx_sent_queue(txring_txq(tx_ring), first->bytecount);
tx_ring->queue_index),
first->bytecount);
i40e_maybe_stop_tx(tx_ring, DESC_NEEDED); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting: /* Algorithm to optimize tail and RS bit setting:
...@@ -2039,13 +2036,11 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2039,13 +2036,11 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
* trigger a force WB. * trigger a force WB.
*/ */
if (skb->xmit_more && if (skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, !netif_xmit_stopped(txring_txq(tx_ring))) {
tx_ring->queue_index))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET; tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false; tail_bump = false;
} else if (!skb->xmit_more && } else if (!skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev, !netif_xmit_stopped(txring_txq(tx_ring)) &&
tx_ring->queue_index)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) && (!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) && (tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) { (desc_count < WB_STRIDE)) {
......
...@@ -445,4 +445,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype) ...@@ -445,4 +445,13 @@ static inline bool i40e_rx_is_fcoe(u16 ptype)
return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) && return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
(ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER); (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
} }
/**
* txring_txq - Find the netdev Tx ring based on the i40e Tx ring
* @ring: Tx ring to find the netdev equivalent of
**/
static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
{
return netdev_get_tx_queue(ring->netdev, ring->queue_index);
}
#endif /* _I40E_TXRX_H_ */ #endif /* _I40E_TXRX_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment