Commit fa71ae27 authored by Alexander Duyck's avatar Alexander Duyck Committed by Jeff Kirsher

ixgbevf: Move Tx clean-up into NAPI context

Currently the VF driver is processing all of the transmits in interrupt
context.  This can be messy since the Rx is all handled in NAPI and this
may result in interrupts being disabled.  In order to resolve this move all
of the Tx packet processing into NAPI and combine all of the interrupt and
polling routines into just a pair of functions.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarGreg Rose <gregory.v.rose@intel.com>
Tested-by: default avatarSibai Li <sibai.li@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 6b43c446
...@@ -364,7 +364,6 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -364,7 +364,6 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
} }
goto err_tx_ring_setup; goto err_tx_ring_setup;
} }
tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
} }
memcpy(rx_ring, adapter->rx_ring, memcpy(rx_ring, adapter->rx_ring,
...@@ -380,7 +379,6 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -380,7 +379,6 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
} }
goto err_rx_ring_setup; goto err_rx_ring_setup;
} }
rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
} }
/* /*
......
...@@ -81,11 +81,6 @@ struct ixgbevf_ring { ...@@ -81,11 +81,6 @@ struct ixgbevf_ring {
* offset associated with this ring, which is different * offset associated with this ring, which is different
* for DCB and RSS modes */ * for DCB and RSS modes */
u64 v_idx; /* maps directly to the index for this ring in the hardware
* vector array, can also be used for finding the bit in EICR
* and friends that represents the vector for this ring */
u16 work_limit; /* max work per interrupt */
u16 rx_buf_len; u16 rx_buf_len;
}; };
...@@ -140,6 +135,7 @@ struct ixgbevf_q_vector { ...@@ -140,6 +135,7 @@ struct ixgbevf_q_vector {
struct ixgbevf_ring_container rx, tx; struct ixgbevf_ring_container rx, tx;
u32 eitr; u32 eitr;
int v_idx; /* vector index in list */ int v_idx; /* vector index in list */
char name[IFNAMSIZ + 9];
}; };
/* Helper macros to switch between ints/sec and what the register uses. /* Helper macros to switch between ints/sec and what the register uses.
...@@ -167,9 +163,8 @@ struct ixgbevf_q_vector { ...@@ -167,9 +163,8 @@ struct ixgbevf_q_vector {
#define NON_Q_VECTORS (OTHER_VECTOR) #define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_Q_VECTORS 2 #define MAX_MSIX_Q_VECTORS 2
#define MAX_MSIX_COUNT 2
#define MIN_MSIX_Q_VECTORS 2 #define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
/* board specific private data structure */ /* board specific private data structure */
...@@ -179,7 +174,6 @@ struct ixgbevf_adapter { ...@@ -179,7 +174,6 @@ struct ixgbevf_adapter {
u16 bd_number; u16 bd_number;
struct work_struct reset_task; struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
/* Interrupt Throttle Rate */ /* Interrupt Throttle Rate */
u32 itr_setting; u32 itr_setting;
...@@ -187,6 +181,7 @@ struct ixgbevf_adapter { ...@@ -187,6 +181,7 @@ struct ixgbevf_adapter {
/* TX */ /* TX */
struct ixgbevf_ring *tx_ring; /* One per active queue */ struct ixgbevf_ring *tx_ring; /* One per active queue */
int num_tx_queues; int num_tx_queues;
u16 tx_itr_setting;
u64 restart_queue; u64 restart_queue;
u64 hw_csum_tx_good; u64 hw_csum_tx_good;
u64 lsc_int; u64 lsc_int;
...@@ -197,6 +192,7 @@ struct ixgbevf_adapter { ...@@ -197,6 +192,7 @@ struct ixgbevf_adapter {
/* RX */ /* RX */
struct ixgbevf_ring *rx_ring; /* One per active queue */ struct ixgbevf_ring *rx_ring; /* One per active queue */
int num_rx_queues; int num_rx_queues;
u16 rx_itr_setting;
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources; u64 hw_rx_no_dma_resources;
u64 hw_csum_rx_good; u64 hw_csum_rx_good;
......
...@@ -97,7 +97,7 @@ module_param(debug, int, 0); ...@@ -97,7 +97,7 @@ module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
/* forward decls */ /* forward decls */
static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector); static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector);
static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
u32 itr_reg); u32 itr_reg);
...@@ -182,14 +182,14 @@ static void ixgbevf_tx_timeout(struct net_device *netdev); ...@@ -182,14 +182,14 @@ static void ixgbevf_tx_timeout(struct net_device *netdev);
/** /**
* ixgbevf_clean_tx_irq - Reclaim resources after transmit completes * ixgbevf_clean_tx_irq - Reclaim resources after transmit completes
* @adapter: board private structure * @q_vector: board private structure
* @tx_ring: tx ring to clean * @tx_ring: tx ring to clean
**/ **/
static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *tx_ring) struct ixgbevf_ring *tx_ring)
{ {
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc; union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbevf_tx_buffer *tx_buffer_info; struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int i, eop, count = 0; unsigned int i, eop, count = 0;
...@@ -200,7 +200,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, ...@@ -200,7 +200,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop); eop_desc = IXGBE_TX_DESC_ADV(*tx_ring, eop);
while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) && while ((eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)) &&
(count < tx_ring->work_limit)) { (count < tx_ring->count)) {
bool cleaned = false; bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */ rmb(); /* read buffer_info after eop_desc */
/* eop could change between read and DD-check */ /* eop could change between read and DD-check */
...@@ -256,18 +256,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter, ...@@ -256,18 +256,12 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_adapter *adapter,
} }
} }
/* re-arm the interrupt */
if ((count >= tx_ring->work_limit) &&
(!test_bit(__IXGBEVF_DOWN, &adapter->state))) {
IXGBE_WRITE_REG(hw, IXGBE_VTEICS, tx_ring->v_idx);
}
u64_stats_update_begin(&tx_ring->syncp); u64_stats_update_begin(&tx_ring->syncp);
tx_ring->total_bytes += total_bytes; tx_ring->total_bytes += total_bytes;
tx_ring->total_packets += total_packets; tx_ring->total_packets += total_packets;
u64_stats_update_end(&tx_ring->syncp); u64_stats_update_end(&tx_ring->syncp);
return count < tx_ring->work_limit; return count < tx_ring->count;
} }
/** /**
...@@ -402,7 +396,7 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter, ...@@ -402,7 +396,7 @@ static inline void ixgbevf_irq_enable_queues(struct ixgbevf_adapter *adapter,
static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct ixgbevf_ring *rx_ring, struct ixgbevf_ring *rx_ring,
int *work_done, int work_to_do) int budget)
{ {
struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_adapter *adapter = q_vector->adapter;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
...@@ -411,7 +405,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -411,7 +405,6 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i; unsigned int i;
u32 len, staterr; u32 len, staterr;
bool cleaned = false;
int cleaned_count = 0; int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
...@@ -421,13 +414,12 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -421,13 +414,12 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_buffer_info = &rx_ring->rx_buffer_info[i]; rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) { while (staterr & IXGBE_RXD_STAT_DD) {
if (*work_done >= work_to_do) if (!budget)
break; break;
(*work_done)++; budget--;
rmb(); /* read descriptor and rx_buffer_info after status DD */ rmb(); /* read descriptor and rx_buffer_info after status DD */
len = le16_to_cpu(rx_desc->wb.upper.length); len = le16_to_cpu(rx_desc->wb.upper.length);
cleaned = true;
skb = rx_buffer_info->skb; skb = rx_buffer_info->skb;
prefetch(skb->data - NET_IP_ALIGN); prefetch(skb->data - NET_IP_ALIGN);
rx_buffer_info->skb = NULL; rx_buffer_info->skb = NULL;
...@@ -510,74 +502,52 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -510,74 +502,52 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
rx_ring->total_bytes += total_rx_bytes; rx_ring->total_bytes += total_rx_bytes;
u64_stats_update_end(&rx_ring->syncp); u64_stats_update_end(&rx_ring->syncp);
return cleaned; return !!budget;
}
/**
* ixgbevf_clean_rxonly - msix (aka one shot) rx clean routine
* @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets
*
* This function is optimized for cleaning one queue only on a single
* q_vector!!!
**/
static int ixgbevf_clean_rxonly(struct napi_struct *napi, int budget)
{
struct ixgbevf_q_vector *q_vector =
container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter;
int work_done = 0;
ixgbevf_clean_rx_irq(q_vector, q_vector->rx.ring, &work_done, budget);
/* If all Rx work done, exit the polling mode */
if (work_done < budget) {
napi_complete(napi);
if (adapter->itr_setting & 1)
ixgbevf_set_itr_msix(q_vector);
if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
1 << q_vector->v_idx);
}
return work_done;
} }
/** /**
* ixgbevf_clean_rxonly_many - msix (aka one shot) rx clean routine * ixgbevf_poll - NAPI polling calback
* @napi: napi struct with our devices info in it * @napi: napi struct with our devices info in it
* @budget: amount of work driver is allowed to do this pass, in packets * @budget: amount of work driver is allowed to do this pass, in packets
* *
* This function will clean more than one rx queue associated with a * This function will clean more than one or more rings associated with a
* q_vector. * q_vector.
**/ **/
static int ixgbevf_clean_rxonly_many(struct napi_struct *napi, int budget) static int ixgbevf_poll(struct napi_struct *napi, int budget)
{ {
struct ixgbevf_q_vector *q_vector = struct ixgbevf_q_vector *q_vector =
container_of(napi, struct ixgbevf_q_vector, napi); container_of(napi, struct ixgbevf_q_vector, napi);
struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_ring *rx_ring; struct ixgbevf_ring *ring;
int work_done = 0; int per_ring_budget;
bool clean_complete = true;
ixgbevf_for_each_ring(ring, q_vector->tx)
clean_complete &= ixgbevf_clean_tx_irq(q_vector, ring);
/* attempt to distribute budget to each queue fairly, but don't allow /* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */ * the budget to go below 1 because we'll exit polling */
budget /= (q_vector->rx.count ?: 1); if (q_vector->rx.count > 1)
budget = max(budget, 1); per_ring_budget = max(budget/q_vector->rx.count, 1);
else
ixgbevf_for_each_ring(rx_ring, q_vector->rx) per_ring_budget = budget;
ixgbevf_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
ixgbevf_for_each_ring(ring, q_vector->rx)
/* If all Rx work done, exit the polling mode */ clean_complete &= ixgbevf_clean_rx_irq(q_vector, ring,
if (work_done < budget) { per_ring_budget);
napi_complete(napi);
if (adapter->itr_setting & 1) /* If all work not completed, return budget and keep polling */
ixgbevf_set_itr_msix(q_vector); if (!clean_complete)
if (!test_bit(__IXGBEVF_DOWN, &adapter->state)) return budget;
ixgbevf_irq_enable_queues(adapter, /* all work done, exit the polling mode */
1 << q_vector->v_idx); napi_complete(napi);
} if (adapter->rx_itr_setting & 1)
ixgbevf_set_itr(q_vector);
if (!test_bit(__IXGBEVF_DOWN, &adapter->state))
ixgbevf_irq_enable_queues(adapter,
1 << q_vector->v_idx);
return work_done; return 0;
} }
...@@ -720,7 +690,7 @@ static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx, ...@@ -720,7 +690,7 @@ static void ixgbevf_write_eitr(struct ixgbevf_adapter *adapter, int v_idx,
IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg); IXGBE_WRITE_REG(hw, IXGBE_VTEITR(v_idx), itr_reg);
} }
static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) static void ixgbevf_set_itr(struct ixgbevf_q_vector *q_vector)
{ {
struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_adapter *adapter = q_vector->adapter;
u32 new_itr; u32 new_itr;
...@@ -780,8 +750,7 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector) ...@@ -780,8 +750,7 @@ static void ixgbevf_set_itr_msix(struct ixgbevf_q_vector *q_vector)
static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
{ {
struct net_device *netdev = data; struct ixgbevf_adapter *adapter = data;
struct ixgbevf_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 eicr; u32 eicr;
u32 msg; u32 msg;
...@@ -821,59 +790,22 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data) ...@@ -821,59 +790,22 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static irqreturn_t ixgbevf_msix_clean_tx(int irq, void *data)
{
struct ixgbevf_q_vector *q_vector = data;
struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbevf_ring *tx_ring;
if (!q_vector->tx.ring)
return IRQ_HANDLED;
ixgbevf_for_each_ring(tx_ring, q_vector->tx) {
tx_ring->total_bytes = 0;
tx_ring->total_packets = 0;
ixgbevf_clean_tx_irq(adapter, tx_ring);
}
if (adapter->itr_setting & 1)
ixgbevf_set_itr_msix(q_vector);
return IRQ_HANDLED;
}
/** /**
* ixgbevf_msix_clean_rx - single unshared vector rx clean (all queues) * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
* @irq: unused * @irq: unused
* @data: pointer to our q_vector struct for this interrupt vector * @data: pointer to our q_vector struct for this interrupt vector
**/ **/
static irqreturn_t ixgbevf_msix_clean_rx(int irq, void *data) static irqreturn_t ixgbevf_msix_clean_rings(int irq, void *data)
{ {
struct ixgbevf_q_vector *q_vector = data; struct ixgbevf_q_vector *q_vector = data;
struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_adapter *adapter = q_vector->adapter;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbevf_ring *rx_ring;
ixgbevf_for_each_ring(rx_ring, q_vector->rx) {
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
}
if (!q_vector->rx.ring)
return IRQ_HANDLED;
/* disable interrupts on this vector only */ /* disable interrupts on this vector only */
IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, 1 << q_vector->v_idx); IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, 1 << q_vector->v_idx);
napi_schedule(&q_vector->napi); if (q_vector->rx.ring || q_vector->tx.ring)
napi_schedule(&q_vector->napi);
return IRQ_HANDLED;
}
static irqreturn_t ixgbevf_msix_clean_many(int irq, void *data)
{
ixgbevf_msix_clean_rx(irq, data);
ixgbevf_msix_clean_tx(irq, data);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -886,7 +818,6 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx, ...@@ -886,7 +818,6 @@ static inline void map_vector_to_rxq(struct ixgbevf_adapter *a, int v_idx,
a->rx_ring[r_idx].next = q_vector->rx.ring; a->rx_ring[r_idx].next = q_vector->rx.ring;
q_vector->rx.ring = &a->rx_ring[r_idx]; q_vector->rx.ring = &a->rx_ring[r_idx];
q_vector->rx.count++; q_vector->rx.count++;
a->rx_ring[r_idx].v_idx = 1 << v_idx;
} }
static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
...@@ -897,7 +828,6 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx, ...@@ -897,7 +828,6 @@ static inline void map_vector_to_txq(struct ixgbevf_adapter *a, int v_idx,
a->tx_ring[t_idx].next = q_vector->tx.ring; a->tx_ring[t_idx].next = q_vector->tx.ring;
q_vector->tx.ring = &a->tx_ring[t_idx]; q_vector->tx.ring = &a->tx_ring[t_idx];
q_vector->tx.count++; q_vector->tx.count++;
a->tx_ring[t_idx].v_idx = 1 << v_idx;
} }
/** /**
...@@ -973,37 +903,30 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) ...@@ -973,37 +903,30 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
irqreturn_t (*handler)(int, void *); int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int i, vector, q_vectors, err; int vector, err;
int ri = 0, ti = 0; int ri = 0, ti = 0;
/* Decrement for Other and TCP Timer vectors */
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
#define SET_HANDLER(_v) (((_v)->rx.ring && (_v)->tx.ring) \
? &ixgbevf_msix_clean_many : \
(_v)->rx.ring ? &ixgbevf_msix_clean_rx : \
(_v)->tx.ring ? &ixgbevf_msix_clean_tx : \
NULL)
for (vector = 0; vector < q_vectors; vector++) { for (vector = 0; vector < q_vectors; vector++) {
handler = SET_HANDLER(adapter->q_vector[vector]); struct ixgbevf_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector];
if (handler == &ixgbevf_msix_clean_rx) {
sprintf(adapter->name[vector], "%s-%s-%d", if (q_vector->tx.ring && q_vector->rx.ring) {
netdev->name, "rx", ri++); snprintf(q_vector->name, sizeof(q_vector->name) - 1,
} else if (handler == &ixgbevf_msix_clean_tx) { "%s-%s-%d", netdev->name, "TxRx", ri++);
sprintf(adapter->name[vector], "%s-%s-%d", ti++;
netdev->name, "tx", ti++); } else if (q_vector->rx.ring) {
} else if (handler == &ixgbevf_msix_clean_many) { snprintf(q_vector->name, sizeof(q_vector->name) - 1,
sprintf(adapter->name[vector], "%s-%s-%d", "%s-%s-%d", netdev->name, "rx", ri++);
netdev->name, "TxRx", vector); } else if (q_vector->tx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1,
"%s-%s-%d", netdev->name, "tx", ti++);
} else { } else {
/* skip this unused q_vector */ /* skip this unused q_vector */
continue; continue;
} }
err = request_irq(adapter->msix_entries[vector].vector, err = request_irq(entry->vector, &ixgbevf_msix_clean_rings, 0,
handler, 0, adapter->name[vector], q_vector->name, q_vector);
adapter->q_vector[vector]);
if (err) { if (err) {
hw_dbg(&adapter->hw, hw_dbg(&adapter->hw,
"request_irq failed for MSIX interrupt " "request_irq failed for MSIX interrupt "
...@@ -1012,9 +935,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) ...@@ -1012,9 +935,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
} }
} }
sprintf(adapter->name[vector], "%s:mbx", netdev->name);
err = request_irq(adapter->msix_entries[vector].vector, err = request_irq(adapter->msix_entries[vector].vector,
&ixgbevf_msix_mbx, 0, adapter->name[vector], netdev); &ixgbevf_msix_mbx, 0, netdev->name, adapter);
if (err) { if (err) {
hw_dbg(&adapter->hw, hw_dbg(&adapter->hw,
"request_irq for msix_mbx failed: %d\n", err); "request_irq for msix_mbx failed: %d\n", err);
...@@ -1024,9 +946,11 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) ...@@ -1024,9 +946,11 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
return 0; return 0;
free_queue_irqs: free_queue_irqs:
for (i = vector - 1; i >= 0; i--) while (vector) {
free_irq(adapter->msix_entries[--vector].vector, vector--;
&(adapter->q_vector[i])); free_irq(adapter->msix_entries[vector].vector,
adapter->q_vector[vector]);
}
pci_disable_msix(adapter->pdev); pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries); kfree(adapter->msix_entries);
adapter->msix_entries = NULL; adapter->msix_entries = NULL;
...@@ -1069,17 +993,20 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) ...@@ -1069,17 +993,20 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter) static void ixgbevf_free_irq(struct ixgbevf_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev;
int i, q_vectors; int i, q_vectors;
q_vectors = adapter->num_msix_vectors; q_vectors = adapter->num_msix_vectors;
i = q_vectors - 1; i = q_vectors - 1;
free_irq(adapter->msix_entries[i].vector, netdev); free_irq(adapter->msix_entries[i].vector, adapter);
i--; i--;
for (; i >= 0; i--) { for (; i >= 0; i--) {
/* free only the irqs that were actually requested */
if (!adapter->q_vector[i]->rx.ring &&
!adapter->q_vector[i]->tx.ring)
continue;
free_irq(adapter->msix_entries[i].vector, free_irq(adapter->msix_entries[i].vector,
adapter->q_vector[i]); adapter->q_vector[i]);
} }
...@@ -1317,15 +1244,8 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter) ...@@ -1317,15 +1244,8 @@ static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) { for (q_idx = 0; q_idx < q_vectors; q_idx++) {
struct napi_struct *napi;
q_vector = adapter->q_vector[q_idx]; q_vector = adapter->q_vector[q_idx];
if (!q_vector->rx.ring) napi_enable(&q_vector->napi);
continue;
napi = &q_vector->napi;
if (q_vector->rx.count > 1)
napi->poll = &ixgbevf_clean_rxonly_many;
napi_enable(napi);
} }
} }
...@@ -1337,8 +1257,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter) ...@@ -1337,8 +1257,6 @@ static void ixgbevf_napi_disable_all(struct ixgbevf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) { for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx]; q_vector = adapter->q_vector[q_idx];
if (!q_vector->rx.ring)
continue;
napi_disable(&q_vector->napi); napi_disable(&q_vector->napi);
} }
} }
...@@ -1703,10 +1621,9 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, ...@@ -1703,10 +1621,9 @@ static void ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
{ {
int err, vector_threshold; int err, vector_threshold;
/* We'll want at least 3 (vector_threshold): /* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] Cleanup * 1) TxQ[0] + RxQ[0] handler
* 2) RxQ[0] Cleanup * 2) Other (Link Status Change, etc.)
* 3) Other (Link Status Change, etc.)
*/ */
vector_threshold = MIN_MSIX_COUNT; vector_threshold = MIN_MSIX_COUNT;
...@@ -1821,10 +1738,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) ...@@ -1821,10 +1738,12 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
* It's easy to be greedy for MSI-X vectors, but it really * It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors * doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for * than CPU's. So let's be conservative and only ask for
* (roughly) twice the number of vectors as there are CPU's. * (roughly) the same number of vectors as there are CPU's.
* The default is to use pairs of vectors.
*/ */
v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
(int)(num_online_cpus() * 2)) + NON_Q_VECTORS; v_budget = min_t(int, v_budget, num_online_cpus());
v_budget += NON_Q_VECTORS;
/* A failure in MSI-X entry allocation isn't fatal, but it does /* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */ * mean we disable MSI-X capabilities of the adapter. */
...@@ -1855,12 +1774,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) ...@@ -1855,12 +1774,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
{ {
int q_idx, num_q_vectors; int q_idx, num_q_vectors;
struct ixgbevf_q_vector *q_vector; struct ixgbevf_q_vector *q_vector;
int napi_vectors;
int (*poll)(struct napi_struct *, int);
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
napi_vectors = adapter->num_rx_queues;
poll = &ixgbevf_clean_rxonly;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL); q_vector = kzalloc(sizeof(struct ixgbevf_q_vector), GFP_KERNEL);
...@@ -1869,9 +1784,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter) ...@@ -1869,9 +1784,8 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
q_vector->adapter = adapter; q_vector->adapter = adapter;
q_vector->v_idx = q_idx; q_vector->v_idx = q_idx;
q_vector->eitr = adapter->eitr_param; q_vector->eitr = adapter->eitr_param;
if (q_idx < napi_vectors) netif_napi_add(adapter->netdev, &q_vector->napi,
netif_napi_add(adapter->netdev, &q_vector->napi, ixgbevf_poll, 64);
(*poll), 64);
adapter->q_vector[q_idx] = q_vector; adapter->q_vector[q_idx] = q_vector;
} }
...@@ -2272,7 +2186,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter, ...@@ -2272,7 +2186,6 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
tx_ring->next_to_use = 0; tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0; tx_ring->next_to_clean = 0;
tx_ring->work_limit = tx_ring->count;
return 0; return 0;
err: err:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment