Commit 13fde97a authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

igb: Make Tx budget for NAPI user adjustable

This change is to make the NAPI budget limits for transmit
adjustable.  Currently they are only set to 128, and when
the changes/improvements to NAPI occur to allow for adjustability,
it would be possible to tune the value for optimal
performance with applications such as routing.

v2: remove tie between NAPI and interrupt moderation
    fix work limit define name (s/IXGBE/IGB/)
    Update patch description to better reflect patch
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Tested-by: default avatarAaron Brown <aaron.f.brown@intel.com>
parent b64e9dd5
...@@ -47,6 +47,7 @@ struct igb_adapter; ...@@ -47,6 +47,7 @@ struct igb_adapter;
/* TX/RX descriptor defines */ /* TX/RX descriptor defines */
#define IGB_DEFAULT_TXD 256 #define IGB_DEFAULT_TXD 256
#define IGB_DEFAULT_TX_WORK 128
#define IGB_MIN_TXD 80 #define IGB_MIN_TXD 80
#define IGB_MAX_TXD 4096 #define IGB_MAX_TXD 4096
...@@ -177,6 +178,7 @@ struct igb_q_vector { ...@@ -177,6 +178,7 @@ struct igb_q_vector {
u32 eims_value; u32 eims_value;
u16 cpu; u16 cpu;
u16 tx_work_limit;
u16 itr_val; u16 itr_val;
u8 set_itr; u8 set_itr;
...@@ -266,6 +268,7 @@ struct igb_adapter { ...@@ -266,6 +268,7 @@ struct igb_adapter {
u16 rx_itr; u16 rx_itr;
/* TX */ /* TX */
u16 tx_work_limit;
u32 tx_timeout_count; u32 tx_timeout_count;
int num_tx_queues; int num_tx_queues;
struct igb_ring *tx_ring[16]; struct igb_ring *tx_ring[16];
......
...@@ -2011,6 +2011,7 @@ static int igb_set_coalesce(struct net_device *netdev, ...@@ -2011,6 +2011,7 @@ static int igb_set_coalesce(struct net_device *netdev,
for (i = 0; i < adapter->num_q_vectors; i++) { for (i = 0; i < adapter->num_q_vectors; i++) {
struct igb_q_vector *q_vector = adapter->q_vector[i]; struct igb_q_vector *q_vector = adapter->q_vector[i];
q_vector->tx_work_limit = adapter->tx_work_limit;
if (q_vector->rx_ring) if (q_vector->rx_ring)
q_vector->itr_val = adapter->rx_itr_setting; q_vector->itr_val = adapter->rx_itr_setting;
else else
......
...@@ -136,8 +136,8 @@ static irqreturn_t igb_msix_ring(int irq, void *); ...@@ -136,8 +136,8 @@ static irqreturn_t igb_msix_ring(int irq, void *);
static void igb_update_dca(struct igb_q_vector *); static void igb_update_dca(struct igb_q_vector *);
static void igb_setup_dca(struct igb_adapter *); static void igb_setup_dca(struct igb_adapter *);
#endif /* CONFIG_IGB_DCA */ #endif /* CONFIG_IGB_DCA */
static bool igb_clean_tx_irq(struct igb_q_vector *);
static int igb_poll(struct napi_struct *, int); static int igb_poll(struct napi_struct *, int);
static bool igb_clean_tx_irq(struct igb_q_vector *);
static bool igb_clean_rx_irq(struct igb_q_vector *, int); static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *); static void igb_tx_timeout(struct net_device *);
...@@ -1120,6 +1120,7 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter, ...@@ -1120,6 +1120,7 @@ static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
q_vector->tx_ring = adapter->tx_ring[ring_idx]; q_vector->tx_ring = adapter->tx_ring[ring_idx];
q_vector->tx_ring->q_vector = q_vector; q_vector->tx_ring->q_vector = q_vector;
q_vector->itr_val = adapter->tx_itr_setting; q_vector->itr_val = adapter->tx_itr_setting;
q_vector->tx_work_limit = adapter->tx_work_limit;
if (q_vector->itr_val && q_vector->itr_val <= 3) if (q_vector->itr_val && q_vector->itr_val <= 3)
q_vector->itr_val = IGB_START_ITR; q_vector->itr_val = IGB_START_ITR;
} }
...@@ -2388,11 +2389,17 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter) ...@@ -2388,11 +2389,17 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word); pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
/* set default ring sizes */
adapter->tx_ring_count = IGB_DEFAULT_TXD; adapter->tx_ring_count = IGB_DEFAULT_TXD;
adapter->rx_ring_count = IGB_DEFAULT_RXD; adapter->rx_ring_count = IGB_DEFAULT_RXD;
/* set default ITR values */
adapter->rx_itr_setting = IGB_DEFAULT_ITR; adapter->rx_itr_setting = IGB_DEFAULT_ITR;
adapter->tx_itr_setting = IGB_DEFAULT_ITR; adapter->tx_itr_setting = IGB_DEFAULT_ITR;
/* set default work limits */
adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
VLAN_HLEN; VLAN_HLEN;
adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
...@@ -5496,7 +5503,7 @@ static int igb_poll(struct napi_struct *napi, int budget) ...@@ -5496,7 +5503,7 @@ static int igb_poll(struct napi_struct *napi, int budget)
igb_update_dca(q_vector); igb_update_dca(q_vector);
#endif #endif
if (q_vector->tx_ring) if (q_vector->tx_ring)
clean_complete = !!igb_clean_tx_irq(q_vector); clean_complete = igb_clean_tx_irq(q_vector);
if (q_vector->rx_ring) if (q_vector->rx_ring)
clean_complete &= igb_clean_rx_irq(q_vector, budget); clean_complete &= igb_clean_rx_irq(q_vector, budget);
...@@ -5578,64 +5585,69 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5578,64 +5585,69 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
{ {
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx_ring; struct igb_ring *tx_ring = q_vector->tx_ring;
struct net_device *netdev = tx_ring->netdev; struct igb_buffer *tx_buffer;
struct e1000_hw *hw = &adapter->hw; union e1000_adv_tx_desc *tx_desc;
struct igb_buffer *buffer_info;
union e1000_adv_tx_desc *tx_desc, *eop_desc;
unsigned int total_bytes = 0, total_packets = 0; unsigned int total_bytes = 0, total_packets = 0;
unsigned int i, eop, count = 0; unsigned int budget = q_vector->tx_work_limit;
bool cleaned = false; u16 i = tx_ring->next_to_clean;
i = tx_ring->next_to_clean; if (test_bit(__IGB_DOWN, &adapter->state))
eop = tx_ring->buffer_info[i].next_to_watch; return true;
eop_desc = IGB_TX_DESC(tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)) && tx_buffer = &tx_ring->buffer_info[i];
(count < tx_ring->count)) { tx_desc = IGB_TX_DESC(tx_ring, i);
rmb(); /* read buffer_info after eop_desc status */
for (cleaned = false; !cleaned; count++) {
tx_desc = IGB_TX_DESC(tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
if (buffer_info->skb) { for (; budget; budget--) {
total_bytes += buffer_info->bytecount; u16 eop = tx_buffer->next_to_watch;
/* gso_segs is currently only valid for tcp */ union e1000_adv_tx_desc *eop_desc;
total_packets += buffer_info->gso_segs;
igb_tx_hwtstamp(q_vector, buffer_info); eop_desc = IGB_TX_DESC(tx_ring, eop);
}
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break;
/* prevent any other reads prior to eop_desc being verified */
rmb();
igb_unmap_and_free_tx_resource(tx_ring, buffer_info); do {
tx_desc->wb.status = 0; tx_desc->wb.status = 0;
if (likely(tx_desc == eop_desc)) {
eop_desc = NULL;
total_bytes += tx_buffer->bytecount;
total_packets += tx_buffer->gso_segs;
igb_tx_hwtstamp(q_vector, tx_buffer);
}
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer);
tx_buffer++;
tx_desc++;
i++; i++;
if (i == tx_ring->count) if (unlikely(i == tx_ring->count)) {
i = 0; i = 0;
} tx_buffer = tx_ring->buffer_info;
eop = tx_ring->buffer_info[i].next_to_watch; tx_desc = IGB_TX_DESC(tx_ring, 0);
eop_desc = IGB_TX_DESC(tx_ring, eop); }
} while (eop_desc);
} }
tx_ring->next_to_clean = i; tx_ring->next_to_clean = i;
u64_stats_update_begin(&tx_ring->tx_syncp);
tx_ring->tx_stats.bytes += total_bytes;
tx_ring->tx_stats.packets += total_packets;
u64_stats_update_end(&tx_ring->tx_syncp);
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets;
if (unlikely(count && if (tx_ring->detect_tx_hung) {
netif_carrier_ok(netdev) && struct e1000_hw *hw = &adapter->hw;
igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) { u16 eop = tx_ring->buffer_info[i].next_to_watch;
/* Make sure that anybody stopping the queue after this union e1000_adv_tx_desc *eop_desc;
* sees the new next_to_clean.
*/
smp_mb();
if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
!(test_bit(__IGB_DOWN, &adapter->state))) {
netif_wake_subqueue(netdev, tx_ring->queue_index);
u64_stats_update_begin(&tx_ring->tx_syncp); eop_desc = IGB_TX_DESC(tx_ring, eop);
tx_ring->tx_stats.restart_queue++;
u64_stats_update_end(&tx_ring->tx_syncp);
}
}
if (tx_ring->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the /* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */ * check with the clearing of time_stamp and movement of i */
tx_ring->detect_tx_hung = false; tx_ring->detect_tx_hung = false;
...@@ -5666,16 +5678,34 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -5666,16 +5678,34 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
eop, eop,
jiffies, jiffies,
eop_desc->wb.status); eop_desc->wb.status);
netif_stop_subqueue(netdev, tx_ring->queue_index); netif_stop_subqueue(tx_ring->netdev,
tx_ring->queue_index);
/* we are about to reset, no point in enabling stuff */
return true;
} }
} }
tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets; if (unlikely(total_packets &&
u64_stats_update_begin(&tx_ring->tx_syncp); netif_carrier_ok(tx_ring->netdev) &&
tx_ring->tx_stats.bytes += total_bytes; igb_desc_unused(tx_ring) >= IGB_TX_QUEUE_WAKE)) {
tx_ring->tx_stats.packets += total_packets; /* Make sure that anybody stopping the queue after this
u64_stats_update_end(&tx_ring->tx_syncp); * sees the new next_to_clean.
return count < tx_ring->count; */
smp_mb();
if (__netif_subqueue_stopped(tx_ring->netdev,
tx_ring->queue_index) &&
!(test_bit(__IGB_DOWN, &adapter->state))) {
netif_wake_subqueue(tx_ring->netdev,
tx_ring->queue_index);
u64_stats_update_begin(&tx_ring->tx_syncp);
tx_ring->tx_stats.restart_queue++;
u64_stats_update_end(&tx_ring->tx_syncp);
}
}
return !!budget;
} }
static inline void igb_rx_checksum(struct igb_ring *ring, static inline void igb_rx_checksum(struct igb_ring *ring,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment