Commit 69077577 authored by Jacob Keller's avatar Jacob Keller Committed by Jeff Kirsher

i40e: avoid permanent lock of *_PTP_TX_IN_PROGRESS

The i40e driver uses a bit lock to indicate when a Tx timestamp is in
progress to avoid attempting to timestamp multiple packets at once. This
is required because hardware only has registers to handle one request at
a time.

There is a corner case where we failed to cleanup the bit lock after
a failed transmit. This can potentially result in a state bit being
locked forever.

Add some cleanup code to i40e_xmit_frame_ring to check and make sure we
cleanup incase of these failures. We also modify i40e_tx_map to return
an error code indication DMA failure.
Reported-by: default avatarReported-by: David Mirabito <davidm@metamako.com>
Signed-off-by: default avatarJacob Keller <jacob.e.keller@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent bbc4e7d2
...@@ -2932,8 +2932,10 @@ bool __i40e_chk_linearize(struct sk_buff *skb) ...@@ -2932,8 +2932,10 @@ bool __i40e_chk_linearize(struct sk_buff *skb)
* @hdr_len: size of the packet header * @hdr_len: size of the packet header
* @td_cmd: the command field in the descriptor * @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc * @td_offset: offset for checksum or crc
*
* Returns 0 on success, -1 on failure to DMA
**/ **/
static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset) const u8 hdr_len, u32 td_cmd, u32 td_offset)
{ {
...@@ -3093,7 +3095,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -3093,7 +3095,7 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
mmiowb(); mmiowb();
} }
return; return 0;
dma_error: dma_error:
dev_info(tx_ring->dev, "TX DMA map failed\n"); dev_info(tx_ring->dev, "TX DMA map failed\n");
...@@ -3110,6 +3112,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -3110,6 +3112,8 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
} }
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
return -1;
} }
/** /**
...@@ -3210,8 +3214,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -3210,8 +3214,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
*/ */
i40e_atr(tx_ring, skb, tx_flags); i40e_atr(tx_ring, skb, tx_flags);
i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len, if (i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
td_cmd, td_offset); td_cmd, td_offset))
goto cleanup_tx_tstamp;
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -3219,6 +3224,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -3219,6 +3224,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring); i40e_trace(xmit_frame_ring_drop, first->skb, tx_ring);
dev_kfree_skb_any(first->skb); dev_kfree_skb_any(first->skb);
first->skb = NULL; first->skb = NULL;
cleanup_tx_tstamp:
if (unlikely(tx_flags & I40E_TX_FLAGS_TSYN)) {
struct i40e_pf *pf = i40e_netdev_to_pf(tx_ring->netdev);
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, pf->state);
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment