Commit 065f5f97 authored by David S. Miller's avatar David S. Miller
parents 6d4fa852 0b7f5d0b
...@@ -278,8 +278,10 @@ enum ixgbe_ring_f_enum { ...@@ -278,8 +278,10 @@ enum ixgbe_ring_f_enum {
#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES #define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
struct ixgbe_ring_feature { struct ixgbe_ring_feature {
int indices; u16 limit; /* upper limit on feature indices */
int mask; u16 indices; /* current value of indices */
u16 mask; /* Mask used for feature to ring mapping */
u16 offset; /* offset to start of feature */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/* /*
...@@ -315,7 +317,7 @@ struct ixgbe_ring_container { ...@@ -315,7 +317,7 @@ struct ixgbe_ring_container {
? 8 : 1) ? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
/* MAX_MSIX_Q_VECTORS of these are allocated, /* MAX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector. * but we only use one per queue-specific vector.
*/ */
struct ixgbe_q_vector { struct ixgbe_q_vector {
...@@ -401,11 +403,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) ...@@ -401,11 +403,11 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring)
#define NON_Q_VECTORS (OTHER_VECTOR) #define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_VECTORS_82599 64 #define MAX_MSIX_VECTORS_82599 64
#define MAX_MSIX_Q_VECTORS_82599 64 #define MAX_Q_VECTORS_82599 64
#define MAX_MSIX_VECTORS_82598 18 #define MAX_MSIX_VECTORS_82598 18
#define MAX_MSIX_Q_VECTORS_82598 16 #define MAX_Q_VECTORS_82598 16
#define MAX_MSIX_Q_VECTORS MAX_MSIX_Q_VECTORS_82599 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
#define MIN_MSIX_Q_VECTORS 1 #define MIN_MSIX_Q_VECTORS 1
...@@ -496,7 +498,7 @@ struct ixgbe_adapter { ...@@ -496,7 +498,7 @@ struct ixgbe_adapter {
u32 alloc_rx_page_failed; u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed; u32 alloc_rx_buff_failed;
struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS];
/* DCB parameters */ /* DCB parameters */
struct ieee_pfc *ixgbe_ieee_pfc; struct ieee_pfc *ixgbe_ieee_pfc;
...@@ -507,8 +509,8 @@ struct ixgbe_adapter { ...@@ -507,8 +509,8 @@ struct ixgbe_adapter {
u8 dcbx_cap; u8 dcbx_cap;
enum ixgbe_fc_mode last_lfc_mode; enum ixgbe_fc_mode last_lfc_mode;
int num_msix_vectors; int num_q_vectors; /* current number of q_vectors for device */
int max_msix_q_vectors; /* true count of q_vectors for device */ int max_q_vectors; /* true count of q_vectors for device */
struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE];
struct msix_entry *msix_entries; struct msix_entry *msix_entries;
......
...@@ -2090,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2090,7 +2090,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector; struct ixgbe_q_vector *q_vector;
int i; int i;
int num_vectors;
u16 tx_itr_param, rx_itr_param; u16 tx_itr_param, rx_itr_param;
bool need_reset = false; bool need_reset = false;
...@@ -2126,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2126,12 +2125,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
/* check the old value and enable RSC if necessary */ /* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter); need_reset = ixgbe_update_rsc(adapter);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) for (i = 0; i < adapter->num_q_vectors; i++) {
num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else
num_vectors = 1;
for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i]; q_vector = adapter->q_vector[i];
if (q_vector->tx.count && !q_vector->rx.count) if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */ /* tx only */
......
...@@ -674,7 +674,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -674,7 +674,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
if (adapter->ring_feature[RING_F_FCOE].indices) { if (adapter->ring_feature[RING_F_FCOE].indices) {
/* Use multiple rx queues for FCoE by redirection table */ /* Use multiple rx queues for FCoE by redirection table */
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
fcoe_i = f->mask + i % f->indices; fcoe_i = f->offset + i % f->indices;
fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q);
...@@ -683,7 +683,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -683,7 +683,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
} else { } else {
/* Use single rx queue for FCoE */ /* Use single rx queue for FCoE */
fcoe_i = f->mask; fcoe_i = f->offset;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE),
...@@ -691,7 +691,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) ...@@ -691,7 +691,7 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
} }
/* send FIP frames to the first FCoE queue */ /* send FIP frames to the first FCoE queue */
fcoe_i = f->mask; fcoe_i = f->offset;
fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
IXGBE_ETQS_QUEUE_EN | IXGBE_ETQS_QUEUE_EN |
...@@ -770,7 +770,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev) ...@@ -770,7 +770,7 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
ixgbe_clear_interrupt_scheme(adapter); ixgbe_clear_interrupt_scheme(adapter);
adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; adapter->flags |= IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; adapter->ring_feature[RING_F_FCOE].limit = IXGBE_FCRETA_SIZE;
netdev->features |= NETIF_F_FCOE_CRC; netdev->features |= NETIF_F_FCOE_CRC;
netdev->features |= NETIF_F_FSO; netdev->features |= NETIF_F_FSO;
netdev->features |= NETIF_F_FCOE_MTU; netdev->features |= NETIF_F_FCOE_MTU;
......
...@@ -138,30 +138,6 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) ...@@ -138,30 +138,6 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
} }
#endif #endif
/**
* ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for Flow Director to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
{
int i;
bool ret = false;
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
return ret;
}
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/** /**
* ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE * ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
...@@ -180,17 +156,14 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) ...@@ -180,17 +156,14 @@ static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
return false; return false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ixgbe_cache_ring_rss(adapter);
ixgbe_cache_ring_fdir(adapter);
else
ixgbe_cache_ring_rss(adapter);
fcoe_rx_i = f->mask; fcoe_rx_i = f->offset;
fcoe_tx_i = f->mask; fcoe_tx_i = f->offset;
} }
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) { for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i; adapter->rx_ring[f->offset + i]->reg_idx = fcoe_rx_i;
adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i; adapter->tx_ring[f->offset + i]->reg_idx = fcoe_tx_i;
} }
return true; return true;
} }
...@@ -244,9 +217,6 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) ...@@ -244,9 +217,6 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
return; return;
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
if (ixgbe_cache_ring_fdir(adapter))
return;
if (ixgbe_cache_ring_rss(adapter)) if (ixgbe_cache_ring_rss(adapter))
return; return;
} }
...@@ -272,53 +242,39 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) ...@@ -272,53 +242,39 @@ static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
* to allocate one Rx queue per CPU, and if available, one Tx queue per CPU. * to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
* *
**/ **/
static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{ {
bool ret = false; struct ixgbe_ring_feature *f;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS]; u16 rss_i;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) {
f->mask = 0xF; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->num_rx_queues = f->indices; return false;
adapter->num_tx_queues = f->indices;
ret = true;
} }
return ret; /* set mask for 16 queue limit of RSS */
} f = &adapter->ring_feature[RING_F_RSS];
rss_i = f->limit;
/** f->indices = rss_i;
* ixgbe_set_fdir_queues - Allocate queues for Flow Director f->mask = 0xF;
* @adapter: board private structure to initialize
*
* Flow Director is an advanced Rx filter, attempting to get Rx flows back
* to the original CPU that initiated the Tx session. This runs in addition
* to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
* Rx load across CPUs using RSS.
*
**/
static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
f_fdir->mask = 0;
/* /*
* Use RSS in addition to Flow Director to ensure the best * Use Flow Director in addition to RSS to ensure the best
* distribution of flows across cores, even when an FDIR flow * distribution of flows across cores, even when an FDIR flow
* isn't matched. * isn't matched.
*/ */
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) { f = &adapter->ring_feature[RING_F_FDIR];
adapter->num_tx_queues = f_fdir->indices;
adapter->num_rx_queues = f_fdir->indices; f->indices = min_t(u16, num_online_cpus(), f->limit);
ret = true; rss_i = max_t(u16, rss_i, f->indices);
} else {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
} }
return ret;
adapter->num_rx_queues = rss_i;
adapter->num_tx_queues = rss_i;
return true;
} }
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
...@@ -327,10 +283,7 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) ...@@ -327,10 +283,7 @@ static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
* *
* FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. * FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
* The ring feature mask is not used as a mask for FCoE, as it can take any 8 * Offset is used as the index of the first rx queue used by FCoE.
* rx queues out of the max number of rx queues, instead, it is used as the
* index of the first rx queue used by FCoE.
*
**/ **/
static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
{ {
...@@ -339,21 +292,18 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) ...@@ -339,21 +292,18 @@ static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false; return false;
f->indices = min_t(int, num_online_cpus(), f->indices); f->indices = min_t(int, num_online_cpus(), f->limit);
adapter->num_rx_queues = 1; adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1; adapter->num_tx_queues = 1;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
e_info(probe, "FCoE enabled with RSS\n"); e_info(probe, "FCoE enabled with RSS\n");
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ixgbe_set_rss_queues(adapter);
ixgbe_set_fdir_queues(adapter);
else
ixgbe_set_rss_queues(adapter);
} }
/* adding FCoE rx rings to the end */ /* adding FCoE rx rings to the end */
f->mask = adapter->num_rx_queues; f->offset = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices; adapter->num_rx_queues += f->indices;
adapter->num_tx_queues += f->indices; adapter->num_tx_queues += f->indices;
...@@ -388,7 +338,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) ...@@ -388,7 +338,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/* FCoE enabled queues require special configuration indexed /* FCoE enabled queues require special configuration indexed
* by feature specific indices and mask. Here we map FCoE * by feature specific indices and offset. Here we map FCoE
* indices onto the DCB queue pairs allowing FCoE to own * indices onto the DCB queue pairs allowing FCoE to own
* configuration later. * configuration later.
*/ */
...@@ -401,7 +351,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) ...@@ -401,7 +351,7 @@ static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc); ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
tc = prio_tc[adapter->fcoe.up]; tc = prio_tc[adapter->fcoe.up];
f->indices = dev->tc_to_txq[tc].count; f->indices = dev->tc_to_txq[tc].count;
f->mask = dev->tc_to_txq[tc].offset; f->offset = dev->tc_to_txq[tc].offset;
} }
#endif #endif
...@@ -441,9 +391,6 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter) ...@@ -441,9 +391,6 @@ static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
goto done; goto done;
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
if (ixgbe_set_fdir_queues(adapter))
goto done;
if (ixgbe_set_rss_queues(adapter)) if (ixgbe_set_rss_queues(adapter))
goto done; goto done;
...@@ -507,8 +454,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, ...@@ -507,8 +454,8 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated. * vectors we were allocated.
*/ */
adapter->num_msix_vectors = min(vectors, vectors -= NON_Q_VECTORS;
adapter->max_msix_q_vectors + NON_Q_VECTORS); adapter->num_q_vectors = min(vectors, adapter->max_q_vectors);
} }
} }
...@@ -632,8 +579,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -632,8 +579,8 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
if (adapter->netdev->features & NETIF_F_FCOE_MTU) { if (adapter->netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f; struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE]; f = &adapter->ring_feature[RING_F_FCOE];
if ((rxr_idx >= f->mask) && if ((rxr_idx >= f->offset) &&
(rxr_idx < f->mask + f->indices)) (rxr_idx < f->offset + f->indices))
set_bit(__IXGBE_RX_FCOE, &ring->state); set_bit(__IXGBE_RX_FCOE, &ring->state);
} }
...@@ -695,7 +642,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) ...@@ -695,7 +642,7 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
**/ **/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{ {
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; int q_vectors = adapter->num_q_vectors;
int rxr_remaining = adapter->num_rx_queues; int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues; int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0; int rxr_idx = 0, txr_idx = 0, v_idx = 0;
...@@ -739,10 +686,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) ...@@ -739,10 +686,12 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
return 0; return 0;
err_out: err_out:
while (v_idx) { adapter->num_tx_queues = 0;
v_idx--; adapter->num_rx_queues = 0;
adapter->num_q_vectors = 0;
while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx); ixgbe_free_q_vector(adapter, v_idx);
}
return -ENOMEM; return -ENOMEM;
} }
...@@ -757,14 +706,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) ...@@ -757,14 +706,13 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
**/ **/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{ {
int v_idx, q_vectors; int v_idx = adapter->num_q_vectors;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) adapter->num_tx_queues = 0;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; adapter->num_rx_queues = 0;
else adapter->num_q_vectors = 0;
q_vectors = 1;
for (v_idx = 0; v_idx < q_vectors; v_idx++) while (v_idx--)
ixgbe_free_q_vector(adapter, v_idx); ixgbe_free_q_vector(adapter, v_idx);
} }
...@@ -844,6 +792,8 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) ...@@ -844,6 +792,8 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
if (err) if (err)
return err; return err;
adapter->num_q_vectors = 1;
err = pci_enable_msi(adapter->pdev); err = pci_enable_msi(adapter->pdev);
if (!err) { if (!err) {
adapter->flags |= IXGBE_FLAG_MSI_ENABLED; adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
......
...@@ -993,7 +993,6 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector) ...@@ -993,7 +993,6 @@ static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{ {
int num_q_vectors;
int i; int i;
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
...@@ -1002,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) ...@@ -1002,12 +1001,7 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
/* always use CB2 mode, difference is masked in the CB driver */ /* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) for (i = 0; i < adapter->num_q_vectors; i++) {
num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else
num_q_vectors = 1;
for (i = 0; i < num_q_vectors; i++) {
adapter->q_vector[i]->cpu = -1; adapter->q_vector[i]->cpu = -1;
ixgbe_update_dca(adapter->q_vector[i]); ixgbe_update_dca(adapter->q_vector[i]);
} }
...@@ -1831,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -1831,11 +1825,9 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_q_vector *q_vector; struct ixgbe_q_vector *q_vector;
int q_vectors, v_idx; int v_idx;
u32 mask; u32 mask;
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* Populate MSIX to EITR Select */ /* Populate MSIX to EITR Select */
if (adapter->num_vfs > 32) { if (adapter->num_vfs > 32) {
u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1;
...@@ -1846,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) ...@@ -1846,7 +1838,7 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
* Populate the IVAR table and set the ITR values to the * Populate the IVAR table and set the ITR values to the
* corresponding register. * corresponding register.
*/ */
for (v_idx = 0; v_idx < q_vectors; v_idx++) { for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
struct ixgbe_ring *ring; struct ixgbe_ring *ring;
q_vector = adapter->q_vector[v_idx]; q_vector = adapter->q_vector[v_idx];
...@@ -2410,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget) ...@@ -2410,11 +2402,10 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int vector, err; int vector, err;
int ri = 0, ti = 0; int ri = 0, ti = 0;
for (vector = 0; vector < q_vectors; vector++) { for (vector = 0; vector < adapter->num_q_vectors; vector++) {
struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
struct msix_entry *entry = &adapter->msix_entries[vector]; struct msix_entry *entry = &adapter->msix_entries[vector];
...@@ -2569,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) ...@@ -2569,30 +2560,28 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter)
static void ixgbe_free_irq(struct ixgbe_adapter *adapter) static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
{ {
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { int vector;
int i, q_vectors;
q_vectors = adapter->num_msix_vectors; if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) {
i = q_vectors - 1; free_irq(adapter->pdev->irq, adapter);
free_irq(adapter->msix_entries[i].vector, adapter); return;
i--; }
for (; i >= 0; i--) { for (vector = 0; vector < adapter->num_q_vectors; vector++) {
/* free only the irqs that were actually requested */ struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
if (!adapter->q_vector[i]->rx.ring && struct msix_entry *entry = &adapter->msix_entries[vector];
!adapter->q_vector[i]->tx.ring)
continue;
/* clear the affinity_mask in the IRQ descriptor */ /* free only the irqs that were actually requested */
irq_set_affinity_hint(adapter->msix_entries[i].vector, if (!q_vector->rx.ring && !q_vector->tx.ring)
NULL); continue;
free_irq(adapter->msix_entries[i].vector, /* clear the affinity_mask in the IRQ descriptor */
adapter->q_vector[i]); irq_set_affinity_hint(entry->vector, NULL);
}
} else { free_irq(entry->vector, q_vector);
free_irq(adapter->pdev->irq, adapter);
} }
free_irq(adapter->msix_entries[vector++].vector, adapter);
} }
/** /**
...@@ -2616,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) ...@@ -2616,9 +2605,12 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
} }
IXGBE_WRITE_FLUSH(&adapter->hw); IXGBE_WRITE_FLUSH(&adapter->hw);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int i; int vector;
for (i = 0; i < adapter->num_msix_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector); for (vector = 0; vector < adapter->num_q_vectors; vector++)
synchronize_irq(adapter->msix_entries[vector].vector);
synchronize_irq(adapter->msix_entries[vector++].vector);
} else { } else {
synchronize_irq(adapter->pdev->irq); synchronize_irq(adapter->pdev->irq);
} }
...@@ -2855,40 +2847,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) ...@@ -2855,40 +2847,34 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter)
static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring) struct ixgbe_ring *rx_ring)
{ {
struct ixgbe_hw *hw = &adapter->hw;
u32 srrctl; u32 srrctl;
u8 reg_idx = rx_ring->reg_idx; u8 reg_idx = rx_ring->reg_idx;
switch (adapter->hw.mac.type) { if (hw->mac.type == ixgbe_mac_82598EB) {
case ixgbe_mac_82598EB: { u16 mask = adapter->ring_feature[RING_F_RSS].mask;
struct ixgbe_ring_feature *feature = adapter->ring_feature;
const int mask = feature[RING_F_RSS].mask;
reg_idx = reg_idx & mask;
}
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
default:
break;
}
srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; /*
srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; * if VMDq is not active we must program one srrctl register
if (adapter->num_vfs) * per RSS queue since we have enabled RDRXCTL.MVMEN
srrctl |= IXGBE_SRRCTL_DROP_EN; */
reg_idx &= mask;
}
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & /* configure header buffer length, needed for RSC */
IXGBE_SRRCTL_BSIZEHDR_MASK; srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
/* configure the packet buffer length */
#if PAGE_SIZE > IXGBE_MAX_RXBUFFER #if PAGE_SIZE > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else #else
srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#endif #endif
/* configure descriptor type */
srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl); IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl);
} }
static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
...@@ -3561,33 +3547,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev) ...@@ -3561,33 +3547,17 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter)
{ {
int q_idx; int q_idx;
struct ixgbe_q_vector *q_vector;
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* legacy and MSI only use one vector */ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) napi_enable(&adapter->q_vector[q_idx]->napi);
q_vectors = 1;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
napi_enable(&q_vector->napi);
}
} }
static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
{ {
int q_idx; int q_idx;
struct ixgbe_q_vector *q_vector;
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* legacy and MSI only use one vector */ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++)
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) napi_disable(&adapter->q_vector[q_idx]->napi);
q_vectors = 1;
for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx];
napi_disable(&q_vector->napi);
}
} }
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
...@@ -4410,18 +4380,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -4410,18 +4380,18 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* Set capability flags */ /* Set capability flags */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss; adapter->ring_feature[RING_F_RSS].limit = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED; adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
switch (hw->mac.type) { switch (hw->mac.type) {
case ixgbe_mac_82598EB: case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT) if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; adapter->max_q_vectors = MAX_Q_VECTORS_82598;
break; break;
case ixgbe_mac_X540: case ixgbe_mac_X540:
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB: case ixgbe_mac_82599EB:
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; adapter->max_q_vectors = MAX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
...@@ -4429,13 +4399,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) ...@@ -4429,13 +4399,12 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
/* Flow Director hash filters enabled */ /* Flow Director hash filters enabled */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20; adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices = adapter->ring_feature[RING_F_FDIR].limit =
IXGBE_MAX_FDIR_INDICES; IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */ /* Default traffic class to use for FCoE */
adapter->fcoe.up = IXGBE_FCOE_DEFTC; adapter->fcoe.up = IXGBE_FCOE_DEFTC;
...@@ -5313,7 +5282,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) ...@@ -5313,7 +5282,7 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
(IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER)); (IXGBE_EICS_TCP_TIMER | IXGBE_EICS_OTHER));
} else { } else {
/* get one bit for every active tx/rx interrupt vector */ /* get one bit for every active tx/rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { for (i = 0; i < adapter->num_q_vectors; i++) {
struct ixgbe_q_vector *qv = adapter->q_vector[i]; struct ixgbe_q_vector *qv = adapter->q_vector[i];
if (qv->rx.ring || qv->tx.ring) if (qv->rx.ring || qv->tx.ring)
eics |= ((u64)1 << i); eics |= ((u64)1 << i);
...@@ -6230,8 +6199,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) ...@@ -6230,8 +6199,14 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
if (((protocol == htons(ETH_P_FCOE)) || if (((protocol == htons(ETH_P_FCOE)) ||
(protocol == htons(ETH_P_FIP))) && (protocol == htons(ETH_P_FIP))) &&
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1); struct ixgbe_ring_feature *f;
txq += adapter->ring_feature[RING_F_FCOE].mask;
f = &adapter->ring_feature[RING_F_FCOE];
while (txq >= f->indices)
txq -= f->indices;
txq += adapter->ring_feature[RING_F_FCOE].offset;
return txq; return txq;
} }
#endif #endif
...@@ -6525,11 +6500,8 @@ static void ixgbe_netpoll(struct net_device *netdev) ...@@ -6525,11 +6500,8 @@ static void ixgbe_netpoll(struct net_device *netdev)
adapter->flags |= IXGBE_FLAG_IN_NETPOLL; adapter->flags |= IXGBE_FLAG_IN_NETPOLL;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; for (i = 0; i < adapter->num_q_vectors; i++)
for (i = 0; i < num_q_vectors; i++) { ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
struct ixgbe_q_vector *q_vector = adapter->q_vector[i];
ixgbe_msix_clean_rings(0, q_vector);
}
} else { } else {
ixgbe_intr(adapter->pdev->irq, netdev); ixgbe_intr(adapter->pdev->irq, netdev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment