Commit 7d637bcc authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbe: add a state flags to ring

This change adds a set of state flags to the rings that allow them to
independently function allowing for features like RSC, packet split, and
TX hang detection to be done per ring instead of for the entire device.

This is accomplished by re-purposing the flow director reinit_state member
and making it a global state instead since a long for a single bit flag is
a bit wasteful.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Tested-by: default avatarRoss Brattain <ross.b.brattain@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 33cf09c9
...@@ -159,6 +159,31 @@ struct ixgbe_rx_queue_stats { ...@@ -159,6 +159,31 @@ struct ixgbe_rx_queue_stats {
u64 alloc_rx_buff_failed; u64 alloc_rx_buff_failed;
}; };
enum ixbge_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_DETECT_HANG,
__IXGBE_RX_PS_ENABLED,
__IXGBE_RX_RSC_ENABLED,
};
#define ring_is_ps_enabled(ring) \
test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define set_ring_ps_enabled(ring) \
set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \
clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define ring_is_rsc_enabled(ring) \
test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
#define set_ring_rsc_enabled(ring) \
set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
#define clear_ring_rsc_enabled(ring) \
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring { struct ixgbe_ring {
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
struct device *dev; /* device for DMA mapping */ struct device *dev; /* device for DMA mapping */
...@@ -167,6 +192,7 @@ struct ixgbe_ring { ...@@ -167,6 +192,7 @@ struct ixgbe_ring {
struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info;
}; };
unsigned long state;
u8 atr_sample_rate; u8 atr_sample_rate;
u8 atr_count; u8 atr_count;
u16 count; /* amount of descriptors */ u16 count; /* amount of descriptors */
...@@ -175,28 +201,25 @@ struct ixgbe_ring { ...@@ -175,28 +201,25 @@ struct ixgbe_ring {
u16 next_to_clean; u16 next_to_clean;
u8 queue_index; /* needed for multiqueue queue management */ u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets
* the hardware register offset
* associated with this ring, which is
* different for DCB and RSS modes
*/
u16 work_limit; /* max work per interrupt */
#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
u8 flags; /* per ring feature flags */
u8 __iomem *tail; u8 __iomem *tail;
unsigned int total_bytes; unsigned int total_bytes;
unsigned int total_packets; unsigned int total_packets;
u16 work_limit; /* max work per interrupt */
u16 reg_idx; /* holds the special value that gets
* the hardware register offset
* associated with this ring, which is
* different for DCB and RSS modes
*/
struct ixgbe_queue_stats stats; struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
union { union {
struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats; struct ixgbe_rx_queue_stats rx_stats;
}; };
unsigned long reinit_state;
int numa_node; int numa_node;
unsigned int size; /* length in bytes */ unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */ dma_addr_t dma; /* phys. address of descriptor ring */
...@@ -441,7 +464,6 @@ enum ixbge_state_t { ...@@ -441,7 +464,6 @@ enum ixbge_state_t {
__IXGBE_TESTING, __IXGBE_TESTING,
__IXGBE_RESETTING, __IXGBE_RESETTING,
__IXGBE_DOWN, __IXGBE_DOWN,
__IXGBE_FDIR_INIT_DONE,
__IXGBE_SFP_MODULE_NOT_FOUND __IXGBE_SFP_MODULE_NOT_FOUND
}; };
......
...@@ -687,7 +687,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, ...@@ -687,7 +687,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
/* Detect a transmit hang in hardware, this serializes the /* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of eop */ * check with the clearing of time_stamp and movement of eop */
adapter->detect_tx_hung = false; clear_check_for_tx_hang(tx_ring);
if (tx_ring->tx_buffer_info[eop].time_stamp && if (tx_ring->tx_buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
ixgbe_tx_xon_state(adapter, tx_ring)) { ixgbe_tx_xon_state(adapter, tx_ring)) {
...@@ -786,14 +786,13 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, ...@@ -786,14 +786,13 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
} }
} }
if (adapter->detect_tx_hung) { if (check_for_tx_hang(tx_ring) &&
if (ixgbe_check_tx_hang(adapter, tx_ring, i)) { ixgbe_check_tx_hang(adapter, tx_ring, i)) {
/* schedule immediate reset if we believe we hung */ /* schedule immediate reset if we believe we hung */
e_info(probe, "tx hang %d detected, resetting " e_info(probe, "tx hang %d detected, resetting "
"adapter\n", adapter->tx_timeout_count + 1); "adapter\n", adapter->tx_timeout_count + 1);
ixgbe_tx_timeout(adapter->netdev); ixgbe_tx_timeout(adapter->netdev);
} }
}
/* re-arm the interrupt */ /* re-arm the interrupt */
if (count >= tx_ring->work_limit) if (count >= tx_ring->work_limit)
...@@ -1084,7 +1083,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) ...@@ -1084,7 +1083,7 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
} }
} }
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { if (ring_is_ps_enabled(rx_ring)) {
if (!bi->page) { if (!bi->page) {
bi->page = netdev_alloc_page(rx_ring->netdev); bi->page = netdev_alloc_page(rx_ring->netdev);
if (!bi->page) { if (!bi->page) {
...@@ -1214,7 +1213,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1214,7 +1213,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
(*work_done)++; (*work_done)++;
rmb(); /* read descriptor and rx_buffer_info after status DD */ rmb(); /* read descriptor and rx_buffer_info after status DD */
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { if (ring_is_ps_enabled(rx_ring)) {
hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc)); hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >> len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
IXGBE_RXDADV_HDRBUFLEN_SHIFT; IXGBE_RXDADV_HDRBUFLEN_SHIFT;
...@@ -1284,7 +1283,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1284,7 +1283,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
prefetch(next_rxd); prefetch(next_rxd);
cleaned_count++; cleaned_count++;
if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) if (ring_is_rsc_enabled(rx_ring))
rsc_count = ixgbe_get_rsc_count(rx_desc); rsc_count = ixgbe_get_rsc_count(rx_desc);
if (rsc_count) { if (rsc_count) {
...@@ -1299,7 +1298,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1299,7 +1298,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
if (skb->prev) if (skb->prev)
skb = ixgbe_transform_rsc_queue(skb, skb = ixgbe_transform_rsc_queue(skb,
&(rx_ring->rx_stats.rsc_count)); &(rx_ring->rx_stats.rsc_count));
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { if (ring_is_rsc_enabled(rx_ring)) {
if (IXGBE_RSC_CB(skb)->delay_unmap) { if (IXGBE_RSC_CB(skb)->delay_unmap) {
dma_unmap_single(rx_ring->dev, dma_unmap_single(rx_ring->dev,
IXGBE_RSC_CB(skb)->dma, IXGBE_RSC_CB(skb)->dma,
...@@ -1308,7 +1307,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1308,7 +1307,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
IXGBE_RSC_CB(skb)->dma = 0; IXGBE_RSC_CB(skb)->dma = 0;
IXGBE_RSC_CB(skb)->delay_unmap = false; IXGBE_RSC_CB(skb)->delay_unmap = false;
} }
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) if (ring_is_ps_enabled(rx_ring))
rx_ring->rx_stats.rsc_count += rx_ring->rx_stats.rsc_count +=
skb_shinfo(skb)->nr_frags; skb_shinfo(skb)->nr_frags;
else else
...@@ -1320,7 +1319,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1320,7 +1319,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
rx_ring->stats.bytes += skb->len; rx_ring->stats.bytes += skb->len;
u64_stats_update_end(&rx_ring->syncp); u64_stats_update_end(&rx_ring->syncp);
} else { } else {
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { if (ring_is_ps_enabled(rx_ring)) {
rx_buffer_info->skb = next_buffer->skb; rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma; rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb; next_buffer->skb = skb;
...@@ -1782,8 +1781,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data) ...@@ -1782,8 +1781,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring = struct ixgbe_ring *tx_ring =
adapter->tx_ring[i]; adapter->tx_ring[i];
if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
&tx_ring->reinit_state)) &tx_ring->state))
schedule_work(&adapter->fdir_reinit_task); schedule_work(&adapter->fdir_reinit_task);
} }
} }
...@@ -2522,7 +2521,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, ...@@ -2522,7 +2521,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
} }
/* reinitialize flowdirector state */ /* reinitialize flowdirector state */
set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
/* enable queue */ /* enable queue */
txdctl |= IXGBE_TXDCTL_ENABLE; txdctl |= IXGBE_TXDCTL_ENABLE;
...@@ -2632,7 +2631,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, ...@@ -2632,7 +2631,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK; IXGBE_SRRCTL_BSIZEHDR_MASK;
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { if (ring_is_ps_enabled(rx_ring)) {
#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else #else
...@@ -2727,7 +2726,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, ...@@ -2727,7 +2726,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
int rx_buf_len; int rx_buf_len;
u16 reg_idx = ring->reg_idx; u16 reg_idx = ring->reg_idx;
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) if (!ring_is_rsc_enabled(ring))
return; return;
rx_buf_len = ring->rx_buf_len; rx_buf_len = ring->rx_buf_len;
...@@ -2738,7 +2737,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, ...@@ -2738,7 +2737,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
* total size of max desc * buf_len is not greater * total size of max desc * buf_len is not greater
* than 65535 * than 65535
*/ */
if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { if (ring_is_ps_enabled(ring)) {
#if (MAX_SKB_FRAGS > 16) #if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16; rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8) #elif (MAX_SKB_FRAGS > 8)
...@@ -2976,19 +2975,28 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) ...@@ -2976,19 +2975,28 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
rx_ring->rx_buf_len = rx_buf_len; rx_ring->rx_buf_len = rx_buf_len;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; set_ring_ps_enabled(rx_ring);
else
clear_ring_ps_enabled(rx_ring);
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
set_ring_rsc_enabled(rx_ring);
else else
rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; clear_ring_rsc_enabled(rx_ring);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
if (netdev->features & NETIF_F_FCOE_MTU) { if (netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f; struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE]; f = &adapter->ring_feature[RING_F_FCOE];
if ((i >= f->mask) && (i < f->mask + f->indices)) { if ((i >= f->mask) && (i < f->mask + f->indices)) {
rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; clear_ring_ps_enabled(rx_ring);
if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
rx_ring->rx_buf_len = rx_ring->rx_buf_len =
IXGBE_FCOE_JUMBO_FRAME_SIZE; IXGBE_FCOE_JUMBO_FRAME_SIZE;
} else if (!ring_is_rsc_enabled(rx_ring) &&
!ring_is_ps_enabled(rx_ring)) {
rx_ring->rx_buf_len =
IXGBE_FCOE_JUMBO_FRAME_SIZE;
} }
} }
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
...@@ -5729,8 +5737,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work) ...@@ -5729,8 +5737,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_FDIR_INIT_DONE, set_bit(__IXGBE_TX_FDIR_INIT_DONE,
&(adapter->tx_ring[i]->reinit_state)); &(adapter->tx_ring[i]->state));
} else { } else {
e_err(probe, "failed to finish FDIR re-initialization, " e_err(probe, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters\n"); "ignored adding FDIR ATR filters\n");
...@@ -5816,7 +5824,10 @@ static void ixgbe_watchdog_task(struct work_struct *work) ...@@ -5816,7 +5824,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
netif_carrier_on(netdev); netif_carrier_on(netdev);
} else { } else {
/* Force detection of hung controller */ /* Force detection of hung controller */
adapter->detect_tx_hung = true; for (i = 0; i < adapter->num_tx_queues; i++) {
tx_ring = adapter->tx_ring[i];
set_check_for_tx_hang(tx_ring);
}
} }
} else { } else {
adapter->link_up = false; adapter->link_up = false;
...@@ -6434,8 +6445,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, ...@@ -6434,8 +6445,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
if (tx_ring->atr_sample_rate) { if (tx_ring->atr_sample_rate) {
++tx_ring->atr_count; ++tx_ring->atr_count;
if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) && if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
test_bit(__IXGBE_FDIR_INIT_DONE, test_bit(__IXGBE_TX_FDIR_INIT_DONE,
&tx_ring->reinit_state)) { &tx_ring->state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index, ixgbe_atr(adapter, skb, tx_ring->queue_index,
tx_flags, protocol); tx_flags, protocol);
tx_ring->atr_count = 0; tx_ring->atr_count = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment