Commit 857942fd authored by Anjali Singhai Jain's avatar Anjali Singhai Jain Committed by Jeff Kirsher

i40e: Fix Rx hash reported to the stack by our driver

If the driver calls skb_set_hash even with a zero hash, that
indicates to the stack that the hash calculation is offloaded
in hardware. So the Stack doesn't do a SW hash which is required
for load balancing if the user decides to turn of rx-hashing
on our device.

This patch fixes the path so that we do not call skb_set_hash
if the feature is disabled.

Change-ID: Ic4debfa4ff91b5a72e447348a75768ed7a2d3e1b
Signed-off-by: default avatarAnjali Singhai Jain <anjali.singhai@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent b499ffb0
...@@ -1422,31 +1422,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -1422,31 +1422,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
} }
/** /**
* i40e_rx_hash - returns the hash value from the Rx descriptor * i40e_ptype_to_htype - get a hash type
* @ring: descriptor ring
* @rx_desc: specific descriptor
**/
static inline u32 i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc)
{
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if ((ring->netdev->features & NETIF_F_RXHASH) &&
(rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
else
return 0;
}
/**
* i40e_ptype_to_hash - get a hash type
* @ptype: the ptype value from the descriptor * @ptype: the ptype value from the descriptor
* *
* Returns a hash type to be used by skb_set_hash * Returns a hash type to be used by skb_set_hash
**/ **/
static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
{ {
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
...@@ -1463,6 +1444,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) ...@@ -1463,6 +1444,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
return PKT_HASH_TYPE_L2; return PKT_HASH_TYPE_L2;
} }
/**
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
**/
static inline void i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb,
u8 rx_ptype)
{
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if (ring->netdev->features & NETIF_F_RXHASH)
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
}
}
/** /**
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean * @rx_ring: rx ring to clean
...@@ -1612,8 +1617,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1612,8 +1617,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
continue; continue;
} }
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
i40e_ptype_to_hash(rx_ptype));
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
...@@ -1741,8 +1746,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1741,8 +1746,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
continue; continue;
} }
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
i40e_ptype_to_hash(rx_ptype));
if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) { if (unlikely(rx_status & I40E_RXD_QW1_STATUS_TSYNVALID_MASK)) {
i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status & i40e_ptp_rx_hwtstamp(vsi->back, skb, (rx_status &
I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >> I40E_RXD_QW1_STATUS_TSYNINDX_MASK) >>
......
...@@ -886,31 +886,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, ...@@ -886,31 +886,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
} }
/** /**
* i40e_rx_hash - returns the hash value from the Rx descriptor * i40e_ptype_to_htype - get a hash type
* @ring: descriptor ring
* @rx_desc: specific descriptor
**/
static inline u32 i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc)
{
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if ((ring->netdev->features & NETIF_F_RXHASH) &&
(rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask)
return le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
else
return 0;
}
/**
* i40e_ptype_to_hash - get a hash type
* @ptype: the ptype value from the descriptor * @ptype: the ptype value from the descriptor
* *
* Returns a hash type to be used by skb_set_hash * Returns a hash type to be used by skb_set_hash
**/ **/
static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) static inline enum pkt_hash_types i40e_ptype_to_htype(u8 ptype)
{ {
struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype);
...@@ -927,6 +908,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype) ...@@ -927,6 +908,30 @@ static inline enum pkt_hash_types i40e_ptype_to_hash(u8 ptype)
return PKT_HASH_TYPE_L2; return PKT_HASH_TYPE_L2;
} }
/**
* i40e_rx_hash - set the hash value in the skb
* @ring: descriptor ring
* @rx_desc: specific descriptor
**/
static inline void i40e_rx_hash(struct i40e_ring *ring,
union i40e_rx_desc *rx_desc,
struct sk_buff *skb,
u8 rx_ptype)
{
u32 hash;
const __le64 rss_mask =
cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH <<
I40E_RX_DESC_STATUS_FLTSTAT_SHIFT);
if (ring->netdev->features & NETIF_F_RXHASH)
return;
if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) {
hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss);
skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype));
}
}
/** /**
* i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split * i40e_clean_rx_irq_ps - Reclaim resources after receive; packet split
* @rx_ring: rx ring to clean * @rx_ring: rx ring to clean
...@@ -1068,8 +1073,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1068,8 +1073,8 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
continue; continue;
} }
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
i40e_ptype_to_hash(rx_ptype));
/* probably a little skewed due to removing CRC */ /* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len; total_rx_bytes += skb->len;
total_rx_packets++; total_rx_packets++;
...@@ -1185,8 +1190,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1185,8 +1190,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
continue; continue;
} }
skb_set_hash(skb, i40e_rx_hash(rx_ring, rx_desc), i40e_rx_hash(rx_ring, rx_desc, skb, rx_ptype);
i40e_ptype_to_hash(rx_ptype));
/* probably a little skewed due to removing CRC */ /* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len; total_rx_bytes += skb->len;
total_rx_packets++; total_rx_packets++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment