Commit 866cff06 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

igb: Consolidate all of the ring feature flags into a single value

This change moves all of the ring flags into a single value.  The advantage
to this is that there is one central area for all of these flags and they
can all make use of the set/test bit operations.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarAaron Brown  <aaron.f.brown@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 6ad4edfc
...@@ -237,10 +237,12 @@ struct igb_ring { ...@@ -237,10 +237,12 @@ struct igb_ring {
int numa_node; /* node to alloc ring memory on */ int numa_node; /* node to alloc ring memory on */
}; };
#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */ enum e1000_ring_flags_t {
#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */ IGB_RING_FLAG_RX_CSUM,
IGB_RING_FLAG_RX_SCTP_CSUM,
#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */ IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS) #define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
......
...@@ -708,7 +708,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter) ...@@ -708,7 +708,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring->numa_node = adapter->node; ring->numa_node = adapter->node;
/* For 82575, context index must be unique per ring. */ /* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575) if (adapter->hw.mac.type == e1000_82575)
ring->flags = IGB_RING_FLAG_TX_CTX_IDX; set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
adapter->tx_ring[i] = ring; adapter->tx_ring[i] = ring;
} }
/* Restore the adapter's original node */ /* Restore the adapter's original node */
...@@ -732,10 +732,11 @@ static int igb_alloc_queues(struct igb_adapter *adapter) ...@@ -732,10 +732,11 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring->dev = &adapter->pdev->dev; ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev; ring->netdev = adapter->netdev;
ring->numa_node = adapter->node; ring->numa_node = adapter->node;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ /* enable rx checksum */
set_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags);
/* set flag indicating ring supports SCTP checksum offload */ /* set flag indicating ring supports SCTP checksum offload */
if (adapter->hw.mac.type >= e1000_82576) if (adapter->hw.mac.type >= e1000_82576)
ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM; set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
adapter->rx_ring[i] = ring; adapter->rx_ring[i] = ring;
} }
/* Restore the adapter's original node */ /* Restore the adapter's original node */
...@@ -1822,9 +1823,11 @@ static int igb_set_features(struct net_device *netdev, u32 features) ...@@ -1822,9 +1823,11 @@ static int igb_set_features(struct net_device *netdev, u32 features)
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
if (features & NETIF_F_RXCSUM) if (features & NETIF_F_RXCSUM)
adapter->rx_ring[i]->flags |= IGB_RING_FLAG_RX_CSUM; set_bit(IGB_RING_FLAG_RX_CSUM,
&adapter->rx_ring[i]->flags);
else else
adapter->rx_ring[i]->flags &= ~IGB_RING_FLAG_RX_CSUM; clear_bit(IGB_RING_FLAG_RX_CSUM,
&adapter->rx_ring[i]->flags);
} }
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_RX)
...@@ -4035,7 +4038,7 @@ void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens, ...@@ -4035,7 +4038,7 @@ void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT; type_tucmd |= E1000_TXD_CMD_DEXT | E1000_ADVTXD_DTYP_CTXT;
/* For 82575, context index must be unique per ring. */ /* For 82575, context index must be unique per ring. */
if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
mss_l4len_idx |= tx_ring->reg_idx << 4; mss_l4len_idx |= tx_ring->reg_idx << 4;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
...@@ -4202,7 +4205,7 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring, ...@@ -4202,7 +4205,7 @@ static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
/* 82575 requires a unique index per ring if any offload is enabled */ /* 82575 requires a unique index per ring if any offload is enabled */
if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) && if ((tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_VLAN)) &&
(tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)) test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
olinfo_status |= tx_ring->reg_idx << 4; olinfo_status |= tx_ring->reg_idx << 4;
/* insert L4 checksum */ /* insert L4 checksum */
...@@ -5828,7 +5831,7 @@ static inline void igb_rx_checksum(struct igb_ring *ring, ...@@ -5828,7 +5831,7 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
/* Ignore Checksum bit is set or checksum is disabled through ethtool */ /* Ignore Checksum bit is set or checksum is disabled through ethtool */
if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) || if (!test_bit(IGB_RING_FLAG_RX_CSUM, &ring->flags) ||
(status_err & E1000_RXD_STAT_IXSM)) (status_err & E1000_RXD_STAT_IXSM))
return; return;
...@@ -5840,8 +5843,8 @@ static inline void igb_rx_checksum(struct igb_ring *ring, ...@@ -5840,8 +5843,8 @@ static inline void igb_rx_checksum(struct igb_ring *ring,
* L4E bit is set incorrectly on 64 byte (60 byte w/o crc) * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
* packets, (aka let the stack check the crc32c) * packets, (aka let the stack check the crc32c)
*/ */
if ((skb->len == 60) && if (!((skb->len == 60) &&
(ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM)) { test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
u64_stats_update_begin(&ring->rx_syncp); u64_stats_update_begin(&ring->rx_syncp);
ring->rx_stats.csum_err++; ring->rx_stats.csum_err++;
u64_stats_update_end(&ring->rx_syncp); u64_stats_update_end(&ring->rx_syncp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment