Commit f24fd89a authored by David S. Miller's avatar David S. Miller
parents fb041214 8af3c33f
...@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o ...@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
ixgbe_mbx.o ixgbe_x540.o ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o
ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
ixgbe_dcb_82599.o ixgbe_dcb_nl.o ixgbe_dcb_82599.o ixgbe_dcb_nl.o
......
...@@ -101,8 +101,6 @@ ...@@ -101,8 +101,6 @@
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
#define IXGBE_MAX_RSC_INT_RATE 162760
#define IXGBE_MAX_VF_MC_ENTRIES 30 #define IXGBE_MAX_VF_MC_ENTRIES 30
#define IXGBE_MAX_VF_FUNCTIONS 64 #define IXGBE_MAX_VF_FUNCTIONS 64
#define IXGBE_MAX_VFTA_ENTRIES 128 #define IXGBE_MAX_VFTA_ENTRIES 128
...@@ -152,6 +150,7 @@ struct ixgbe_tx_buffer { ...@@ -152,6 +150,7 @@ struct ixgbe_tx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
unsigned int bytecount; unsigned int bytecount;
unsigned short gso_segs; unsigned short gso_segs;
__be16 protocol;
DEFINE_DMA_UNMAP_ADDR(dma); DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len); DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags; u32 tx_flags;
...@@ -207,15 +206,18 @@ enum ixgbe_ring_state_t { ...@@ -207,15 +206,18 @@ enum ixgbe_ring_state_t {
clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
struct ixgbe_ring { struct ixgbe_ring {
struct ixgbe_ring *next; /* pointer to next ring in q_vector */ struct ixgbe_ring *next; /* pointer to next ring in q_vector */
struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
struct device *dev; /* device for DMA mapping */
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
struct device *dev; /* device for DMA mapping */
struct net_device *netdev; /* netdev ring belongs to */
union { union {
struct ixgbe_tx_buffer *tx_buffer_info; struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info; struct ixgbe_rx_buffer *rx_buffer_info;
}; };
unsigned long state; unsigned long state;
u8 __iomem *tail; u8 __iomem *tail;
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
u16 count; /* amount of descriptors */ u16 count; /* amount of descriptors */
...@@ -225,17 +227,17 @@ struct ixgbe_ring { ...@@ -225,17 +227,17 @@ struct ixgbe_ring {
* associated with this ring, which is * associated with this ring, which is
* different for DCB and RSS modes * different for DCB and RSS modes
*/ */
u16 next_to_use;
u16 next_to_clean;
union { union {
u16 next_to_alloc;
struct { struct {
u8 atr_sample_rate; u8 atr_sample_rate;
u8 atr_count; u8 atr_count;
}; };
u16 next_to_alloc;
}; };
u16 next_to_use;
u16 next_to_clean;
u8 dcb_tc; u8 dcb_tc;
struct ixgbe_queue_stats stats; struct ixgbe_queue_stats stats;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
...@@ -243,9 +245,6 @@ struct ixgbe_ring { ...@@ -243,9 +245,6 @@ struct ixgbe_ring {
struct ixgbe_tx_queue_stats tx_stats; struct ixgbe_tx_queue_stats tx_stats;
struct ixgbe_rx_queue_stats rx_stats; struct ixgbe_rx_queue_stats rx_stats;
}; };
unsigned int size; /* length in bytes */
dma_addr_t dma; /* phys. address of descriptor ring */
struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
enum ixgbe_ring_f_enum { enum ixgbe_ring_f_enum {
...@@ -437,7 +436,8 @@ struct ixgbe_adapter { ...@@ -437,7 +436,8 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) #define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5)
#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) #define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6)
#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7)
#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8)
#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9)
/* Tx fast path data */ /* Tx fast path data */
int num_tx_queues; int num_tx_queues;
...@@ -581,7 +581,9 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, ...@@ -581,7 +581,9 @@ extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
extern char ixgbe_driver_name[]; extern char ixgbe_driver_name[];
extern const char ixgbe_driver_version[]; extern const char ixgbe_driver_version[];
#ifdef IXGBE_FCOE
extern char ixgbe_default_device_descr[]; extern char ixgbe_default_device_descr[];
#endif /* IXGBE_FCOE */
extern void ixgbe_up(struct ixgbe_adapter *adapter); extern void ixgbe_up(struct ixgbe_adapter *adapter);
extern void ixgbe_down(struct ixgbe_adapter *adapter); extern void ixgbe_down(struct ixgbe_adapter *adapter);
...@@ -606,6 +608,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, ...@@ -606,6 +608,7 @@ extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
struct ixgbe_tx_buffer *); struct ixgbe_tx_buffer *);
extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
extern void ixgbe_write_eitr(struct ixgbe_q_vector *); extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
extern int ixgbe_poll(struct napi_struct *napi, int budget);
extern int ethtool_ioctl(struct ifreq *ifr); extern int ethtool_ioctl(struct ifreq *ifr);
extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl);
...@@ -625,14 +628,16 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, ...@@ -625,14 +628,16 @@ extern s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, extern void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask); union ixgbe_atr_input *mask);
extern void ixgbe_set_rx_mode(struct net_device *netdev); extern void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
extern int ixgbe_setup_tc(struct net_device *dev, u8 tc); extern int ixgbe_setup_tc(struct net_device *dev, u8 tc);
#endif
extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev); extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring, extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first,
u32 tx_flags, u8 *hdr_len); u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
......
...@@ -2137,31 +2137,29 @@ static int ixgbe_get_coalesce(struct net_device *netdev, ...@@ -2137,31 +2137,29 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
* this function must be called before setting the new value of * this function must be called before setting the new value of
* rx_itr_setting * rx_itr_setting
*/ */
static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter, static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
struct ethtool_coalesce *ec)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) /* nothing to do if LRO or RSC are not enabled */
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ||
!(netdev->features & NETIF_F_LRO))
return false; return false;
/* if interrupt rate is too high then disable RSC */ /* check the feature flag value and enable RSC if necessary */
if (ec->rx_coalesce_usecs != 1 && if (adapter->rx_itr_setting == 1 ||
ec->rx_coalesce_usecs <= (IXGBE_MIN_RSC_ITR >> 2)) { adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
e_info(probe, "rx-usecs set too low, disabling RSC\n");
adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
return true;
}
} else {
/* check the feature flag value and enable RSC if necessary */
if ((netdev->features & NETIF_F_LRO) &&
!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
e_info(probe, "rx-usecs set to %d, re-enabling RSC\n",
ec->rx_coalesce_usecs);
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
e_info(probe, "rx-usecs value high enough "
"to re-enable RSC\n");
return true; return true;
} }
/* if interrupt rate is too high then disable RSC */
} else if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
e_info(probe, "rx-usecs set too low, disabling RSC\n");
return true;
} }
return false; return false;
} }
...@@ -2185,9 +2183,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2185,9 +2183,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
return -EINVAL; return -EINVAL;
/* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter, ec);
if (ec->rx_coalesce_usecs > 1) if (ec->rx_coalesce_usecs > 1)
adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2; adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
else else
...@@ -2208,6 +2203,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev, ...@@ -2208,6 +2203,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
else else
tx_itr_param = adapter->tx_itr_setting; tx_itr_param = adapter->tx_itr_setting;
/* check the old value and enable RSC if necessary */
need_reset = ixgbe_update_rsc(adapter);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else else
...@@ -2328,6 +2326,48 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter, ...@@ -2328,6 +2326,48 @@ static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter *adapter,
return 0; return 0;
} }
static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *cmd)
{
cmd->data = 0;
/* if RSS is disabled then report no hashing */
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
return 0;
/* Report default options for RSS on ixgbe */
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
case UDP_V4_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case IPV4_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
case UDP_V6_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case IPV6_FLOW:
cmd->data |= RXH_IP_SRC | RXH_IP_DST;
break;
default:
return -EINVAL;
}
return 0;
}
static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
u32 *rule_locs) u32 *rule_locs)
{ {
...@@ -2349,6 +2389,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ...@@ -2349,6 +2389,9 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
case ETHTOOL_GRXCLSRLALL: case ETHTOOL_GRXCLSRLALL:
ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs);
break; break;
case ETHTOOL_GRXFH:
ret = ixgbe_get_rss_hash_opts(adapter, cmd);
break;
default: default:
break; break;
} }
...@@ -2583,6 +2626,111 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, ...@@ -2583,6 +2626,111 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
return err; return err;
} }
#define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
struct ethtool_rxnfc *nfc)
{
u32 flags2 = adapter->flags2;
/*
* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports
*/
if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
switch (nfc->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
!(nfc->data & RXH_L4_B_0_1) ||
!(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
break;
case UDP_V4_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST))
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP;
break;
default:
return -EINVAL;
}
break;
case UDP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST))
return -EINVAL;
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
flags2 &= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
flags2 |= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP;
break;
default:
return -EINVAL;
}
break;
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
case ESP_V4_FLOW:
case SCTP_V4_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
case ESP_V6_FLOW:
case SCTP_V6_FLOW:
if (!(nfc->data & RXH_IP_SRC) ||
!(nfc->data & RXH_IP_DST) ||
(nfc->data & RXH_L4_B_0_1) ||
(nfc->data & RXH_L4_B_2_3))
return -EINVAL;
break;
default:
return -EINVAL;
}
/* if we changed something we need to update flags */
if (flags2 != adapter->flags2) {
struct ixgbe_hw *hw = &adapter->hw;
u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC);
if ((flags2 & UDP_RSS_FLAGS) &&
!(adapter->flags2 & UDP_RSS_FLAGS))
e_warn(drv, "enabling UDP RSS: fragmented packets"
" may arrive out of order to the stack above\n");
adapter->flags2 = flags2;
/* Perform hash on these packet types */
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4
| IXGBE_MRQC_RSS_FIELD_IPV4_TCP
| IXGBE_MRQC_RSS_FIELD_IPV6
| IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
mrqc &= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP |
IXGBE_MRQC_RSS_FIELD_IPV6_UDP);
if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
}
return 0;
}
static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{ {
struct ixgbe_adapter *adapter = netdev_priv(dev); struct ixgbe_adapter *adapter = netdev_priv(dev);
...@@ -2595,6 +2743,9 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) ...@@ -2595,6 +2743,9 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
case ETHTOOL_SRXCLSRLDEL: case ETHTOOL_SRXCLSRLDEL:
ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd); ret = ixgbe_del_ethtool_fdir_entry(adapter, cmd);
break; break;
case ETHTOOL_SRXFH:
ret = ixgbe_set_rss_hash_opt(adapter, cmd);
break;
default: default:
break; break;
} }
......
...@@ -448,16 +448,15 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, ...@@ -448,16 +448,15 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
* ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
* @tx_ring: tx desc ring * @tx_ring: tx desc ring
* @first: first tx_buffer structure containing skb, tx_flags, and protocol * @first: first tx_buffer structure containing skb, tx_flags, and protocol
* @tx_flags: tx flags
* @hdr_len: hdr_len to be returned * @hdr_len: hdr_len to be returned
* *
* This sets up large send offload for FCoE * This sets up large send offload for FCoE
* *
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error * Returns : 0 indicates success, < 0 for error
*/ */
int ixgbe_fso(struct ixgbe_ring *tx_ring, int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first,
u32 tx_flags, u8 *hdr_len) u8 *hdr_len)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
struct fc_frame_header *fh; struct fc_frame_header *fh;
...@@ -539,8 +538,12 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, ...@@ -539,8 +538,12 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len, first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size); skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len; first->bytecount += (first->gso_segs - 1) * *hdr_len;
first->tx_flags |= IXGBE_TX_FLAGS_FSO;
} }
/* set flag indicating FCOE to ixgbe_tx_map call */
first->tx_flags |= IXGBE_TX_FLAGS_FCOE;
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT; mss_l4len_idx |= 1 << IXGBE_ADVTXD_IDX_SHIFT;
...@@ -550,13 +553,13 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, ...@@ -550,13 +553,13 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring,
sizeof(struct fc_frame_header); sizeof(struct fc_frame_header);
vlan_macip_lens |= (skb_transport_offset(skb) - 4) vlan_macip_lens |= (skb_transport_offset(skb) - 4)
<< IXGBE_ADVTXD_MACLEN_SHIFT; << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
/* write context desc */ /* write context desc */
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof,
IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx);
return skb_is_gso(skb); return 0;
} }
static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe)
......
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2012 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include "ixgbe.h"
#include "ixgbe_sriov.h"
/**
* ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for RSS to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i;
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
return false;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->reg_idx = i;
return true;
}
#ifdef CONFIG_IXGBE_DCB
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
unsigned int *tx, unsigned int *rx)
{
struct net_device *dev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u8 num_tcs = netdev_get_num_tc(dev);
*tx = 0;
*rx = 0;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
*tx = tc << 2;
*rx = tc << 3;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
if (num_tcs > 4) {
if (tc < 3) {
*tx = tc << 5;
*rx = tc << 4;
} else if (tc < 5) {
*tx = ((tc + 2) << 4);
*rx = tc << 4;
} else if (tc < num_tcs) {
*tx = ((tc + 8) << 3);
*rx = tc << 4;
}
} else {
*rx = tc << 5;
switch (tc) {
case 0:
*tx = 0;
break;
case 1:
*tx = 64;
break;
case 2:
*tx = 96;
break;
case 3:
*tx = 112;
break;
default:
break;
}
}
break;
default:
break;
}
}
/**
* ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for DCB to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
int i, j, k;
u8 num_tcs = netdev_get_num_tc(dev);
if (!num_tcs)
return false;
for (i = 0, k = 0; i < num_tcs; i++) {
unsigned int tx_s, rx_s;
u16 count = dev->tc_to_txq[i].count;
ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
for (j = 0; j < count; j++, k++) {
adapter->tx_ring[k]->reg_idx = tx_s + j;
adapter->rx_ring[k]->reg_idx = rx_s + j;
adapter->tx_ring[k]->dcb_tc = i;
adapter->rx_ring[k]->dcb_tc = i;
}
}
return true;
}
#endif
/**
* ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for Flow Director to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
{
int i;
bool ret = false;
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
return ret;
}
#ifdef IXGBE_FCOE
/**
* ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for FCoE mode to the assigned rings.
*
*/
static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
int i;
u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
ixgbe_cache_ring_fdir(adapter);
else
ixgbe_cache_ring_rss(adapter);
fcoe_rx_i = f->mask;
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
}
return true;
}
#endif /* IXGBE_FCOE */
/**
* ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
* @adapter: board private structure to initialize
*
* SR-IOV doesn't use any descriptor rings but changes the default if
* no other mapping is used.
*
*/
static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
{
adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
if (adapter->num_vfs)
return true;
else
return false;
}
/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
*
* Once we know the feature-set enabled for the device, we'll cache
* the register offset the descriptor ring is assigned to.
*
* Note, the order the various feature calls is important. It must start with
* the "most" features enabled at the same time, then trickle down to the
* least amount of features turned on at once.
**/
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
/* start with default case */
adapter->rx_ring[0]->reg_idx = 0;
adapter->tx_ring[0]->reg_idx = 0;
if (ixgbe_cache_ring_sriov(adapter))
return;
#ifdef CONFIG_IXGBE_DCB
if (ixgbe_cache_ring_dcb(adapter))
return;
#endif
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
return;
#endif /* IXGBE_FCOE */
if (ixgbe_cache_ring_fdir(adapter))
return;
if (ixgbe_cache_ring_rss(adapter))
return;
}
/**
* ixgbe_set_sriov_queues: Allocate queues for IOV use
* @adapter: board private structure to initialize
*
* IOV doesn't actually use anything, so just NAK the
* request for now and let the other queue routines
* figure out what to do.
*/
static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
{
return false;
}
/**
* ixgbe_set_rss_queues: Allocate queues for RSS
* @adapter: board private structure to initialize
*
* This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
* to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
*
**/
static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
f->mask = 0xF;
adapter->num_rx_queues = f->indices;
adapter->num_tx_queues = f->indices;
ret = true;
}
return ret;
}
/**
* ixgbe_set_fdir_queues: Allocate queues for Flow Director
* @adapter: board private structure to initialize
*
* Flow Director is an advanced Rx filter, attempting to get Rx flows back
* to the original CPU that initiated the Tx session. This runs in addition
* to RSS, so if a packet doesn't match an FDIR filter, we can still spread the
* Rx load across CPUs using RSS.
*
**/
static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR];
f_fdir->indices = min_t(int, num_online_cpus(), f_fdir->indices);
f_fdir->mask = 0;
/*
* Use RSS in addition to Flow Director to ensure the best
* distribution of flows across cores, even when an FDIR flow
* isn't matched.
*/
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
adapter->num_tx_queues = f_fdir->indices;
adapter->num_rx_queues = f_fdir->indices;
ret = true;
} else {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
}
return ret;
}
#ifdef IXGBE_FCOE
/**
* ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE)
* @adapter: board private structure to initialize
*
* FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges.
* The ring feature mask is not used as a mask for FCoE, as it can take any 8
* rx queues out of the max number of rx queues, instead, it is used as the
* index of the first rx queue used by FCoE.
*
**/
static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
f->indices = min_t(int, num_online_cpus(), f->indices);
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
e_info(probe, "FCoE enabled with RSS\n");
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
ixgbe_set_fdir_queues(adapter);
else
ixgbe_set_rss_queues(adapter);
}
/* adding FCoE rx rings to the end */
f->mask = adapter->num_rx_queues;
adapter->num_rx_queues += f->indices;
adapter->num_tx_queues += f->indices;
return true;
}
#endif /* IXGBE_FCOE */
/* Artificial max queue cap per traffic class in DCB mode */
#define DCB_QUEUE_CAP 8
#ifdef CONFIG_IXGBE_DCB
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter)
{
int per_tc_q, q, i, offset = 0;
struct net_device *dev = adapter->netdev;
int tcs = netdev_get_num_tc(dev);
if (!tcs)
return false;
/* Map queue offset and counts onto allocated tx queues */
per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP);
q = min_t(int, num_online_cpus(), per_tc_q);
for (i = 0; i < tcs; i++) {
netdev_set_tc_queue(dev, i, q, offset);
offset += q;
}
adapter->num_tx_queues = q * tcs;
adapter->num_rx_queues = q * tcs;
#ifdef IXGBE_FCOE
/* FCoE enabled queues require special configuration indexed
* by feature specific indices and mask. Here we map FCoE
* indices onto the DCB queue pairs allowing FCoE to own
* configuration later.
*/
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
u8 prio_tc[MAX_USER_PRIORITY] = {0};
int tc;
struct ixgbe_ring_feature *f =
&adapter->ring_feature[RING_F_FCOE];
ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
tc = prio_tc[adapter->fcoe.up];
f->indices = dev->tc_to_txq[tc].count;
f->mask = dev->tc_to_txq[tc].offset;
}
#endif
return true;
}
#endif
/**
* ixgbe_set_num_queues: Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
* important, starting with the "most" number of features turned on at once,
* and ending with the smallest set of features. This way large combinations
* can be allocated if they're turned on, and smaller combinations are the
* fallthrough conditions.
*
**/
static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_queues_per_pool = 1;
if (ixgbe_set_sriov_queues(adapter))
goto done;
#ifdef CONFIG_IXGBE_DCB
if (ixgbe_set_dcb_queues(adapter))
goto done;
#endif
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
#endif /* IXGBE_FCOE */
if (ixgbe_set_fdir_queues(adapter))
goto done;
if (ixgbe_set_rss_queues(adapter))
goto done;
/* fallback to base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
done:
if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
(adapter->netdev->reg_state == NETREG_UNREGISTERING))
return 0;
/* Notify the stack of the (possibly) reduced queue counts. */
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
return netif_set_real_num_rx_queues(adapter->netdev,
adapter->num_rx_queues);
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
int vectors)
{
int err, vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
* 2) Other (Link Status Change, etc.)
*/
vector_threshold = MIN_MSIX_COUNT;
/*
* The more we get, the more we will assign to Tx/Rx Cleanup
* for the separate queues...where Rx Cleanup >= Tx Cleanup.
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
while (vectors >= vector_threshold) {
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
vectors);
if (!err) /* Success in acquiring all requested vectors. */
break;
else if (err < 0)
vectors = 0; /* Nasty failure, quit now */
else /* err == number of vectors we should try again with */
vectors = err;
}
if (vectors < vector_threshold) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
*/
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI-X interrupts\n");
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else {
adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
/*
* Adjust for only the vectors we'll use, which is minimum
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated.
*/
adapter->num_msix_vectors = min(vectors,
adapter->max_msix_q_vectors + NON_Q_VECTORS);
}
}
static void ixgbe_add_ring(struct ixgbe_ring *ring,
struct ixgbe_ring_container *head)
{
ring->next = head->ring;
head->ring = ring;
head->count++;
}
/**
* ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
* @v_idx: index of vector in adapter struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
int txr_count, int txr_idx,
int rxr_count, int rxr_idx)
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
int node = -1;
int cpu = -1;
int ring_count, size;
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
if (cpu_online(v_idx)) {
cpu = v_idx;
node = cpu_to_node(cpu);
}
}
/* allocate q_vector and rings */
q_vector = kzalloc_node(size, GFP_KERNEL, node);
if (!q_vector)
q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
/* setup affinity mask and node */
if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
else
cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
q_vector->numa_node = node;
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
q_vector->v_idx = v_idx;
/* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit;
/* initialize pointer to rings */
ring = q_vector->ring;
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Tx values */
ixgbe_add_ring(ring, &q_vector->tx);
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
/* update count and index */
txr_count--;
txr_idx++;
/* push pointer to next ring */
ring++;
}
while (rxr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Rx values */
ixgbe_add_ring(ring, &q_vector->rx);
/*
* 82599 errata, UDP frames with a 0 checksum
* can be marked as checksum errors.
*/
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
/* update count and index */
rxr_count--;
rxr_idx++;
/* push pointer to next ring */
ring++;
}
return 0;
}
/**
* ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
* @v_idx: Index of vector to be freed
*
* This function frees the memory allocated to the q_vector. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
struct ixgbe_ring *ring;
ixgbe_for_each_ring(ring, q_vector->tx)
adapter->tx_ring[ring->queue_index] = NULL;
ixgbe_for_each_ring(ring, q_vector->rx)
adapter->rx_ring[ring->queue_index] = NULL;
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
/*
* ixgbe_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
}
/**
* ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
**/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0;
int err;
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
q_vectors = 1;
if (q_vectors >= (rxr_remaining + txr_remaining)) {
for (; rxr_remaining; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
err = ixgbe_alloc_q_vector(adapter, v_idx,
0, 0, rqpv, rxr_idx);
if (err)
goto err_out;
/* update counts and index */
rxr_remaining -= rqpv;
rxr_idx += rqpv;
}
}
for (; q_vectors; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
err = ixgbe_alloc_q_vector(adapter, v_idx,
tqpv, txr_idx,
rqpv, rxr_idx);
if (err)
goto err_out;
/* update counts and index */
rxr_remaining -= rqpv;
rxr_idx += rqpv;
txr_remaining -= tqpv;
txr_idx += tqpv;
}
return 0;
err_out:
while (v_idx) {
v_idx--;
ixgbe_free_q_vector(adapter, v_idx);
}
return -ENOMEM;
}
/**
* ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
*
* This function frees the memory allocated to the q_vectors. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
int v_idx, q_vectors;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else
q_vectors = 1;
for (v_idx = 0; v_idx < q_vectors; v_idx++)
ixgbe_free_q_vector(adapter, v_idx);
}
static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
{
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
pci_disable_msi(adapter->pdev);
}
}
/**
* ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
* @adapter: board private structure to initialize
*
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel.
**/
static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int err = 0;
int vector, v_budget;
/*
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
* (roughly) the same number of vectors as there are CPU's.
* The default is to use pairs of vectors.
*/
v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
v_budget = min_t(int, v_budget, num_online_cpus());
v_budget += NON_Q_VECTORS;
/*
* At the same time, hardware can only support a maximum of
* hw.mac->max_msix_vectors vectors. With features
* such as RSS and VMDq, we can easily surpass the number of Rx and Tx
* descriptor queues supported by our device. Thus, we cap it off in
* those rare cases where the cpu count also exceeds our vector limit.
*/
v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (adapter->msix_entries) {
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
ixgbe_acquire_msix_vectors(adapter, v_budget);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
goto out;
}
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
e_err(probe,
"ATR is not supported while multiple "
"queues are disabled. Disabling Flow Director\n");
}
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 0;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter);
err = ixgbe_set_num_queues(adapter);
if (err)
return err;
err = pci_enable_msi(adapter->pdev);
if (!err) {
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
} else {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI interrupt, "
"falling back to legacy. Error: %d\n", err);
/* reset err */
err = 0;
}
out:
return err;
}
/**
* ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
* @adapter: board private structure to initialize
*
* We determine which interrupt scheme to use based on...
* - Kernel support (MSI, MSI-X)
* - which can be user-defined (via MODULE_PARAM)
* - Hardware queue count (num_*_queues)
* - defined by miscellaneous hardware support/features (RSS, etc.)
**/
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
{
int err;
/* Number of supported queues */
err = ixgbe_set_num_queues(adapter);
if (err)
return err;
err = ixgbe_set_interrupt_capability(adapter);
if (err) {
e_dev_err("Unable to setup interrupt capabilities\n");
goto err_set_interrupt;
}
err = ixgbe_alloc_q_vectors(adapter);
if (err) {
e_dev_err("Unable to allocate memory for queue vectors\n");
goto err_alloc_q_vectors;
}
ixgbe_cache_ring_register(adapter);
e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
adapter->num_rx_queues, adapter->num_tx_queues);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
err_alloc_q_vectors:
ixgbe_reset_interrupt_capability(adapter);
err_set_interrupt:
return err;
}
/**
* ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
* @adapter: board private structure to clear interrupt scheme on
*
* We go through and clear interrupt specific resources and reset the structure
* to pre-load conditions
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
}
void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* set bits to identify this as an advanced context descriptor */
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
...@@ -55,8 +55,13 @@ ...@@ -55,8 +55,13 @@
char ixgbe_driver_name[] = "ixgbe"; char ixgbe_driver_name[] = "ixgbe";
static const char ixgbe_driver_string[] = static const char ixgbe_driver_string[] =
"Intel(R) 10 Gigabit PCI Express Network Driver"; "Intel(R) 10 Gigabit PCI Express Network Driver";
#ifdef IXGBE_FCOE
char ixgbe_default_device_descr[] = char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection"; "Intel(R) 10 Gigabit Network Connection";
#else
static char ixgbe_default_device_descr[] =
"Intel(R) 10 Gigabit Network Connection";
#endif
#define MAJ 3 #define MAJ 3
#define MIN 6 #define MIN 6
#define BUILD 7 #define BUILD 7
...@@ -2307,6 +2312,55 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) ...@@ -2307,6 +2312,55 @@ static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/**
* ixgbe_poll - NAPI Rx polling callback
* @napi: structure for representing this polling device
* @budget: how many packets driver is allowed to clean
*
* This function is used for legacy and MSI, NAPI mode
**/
int ixgbe_poll(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring;
int per_ring_budget;
bool clean_complete = true;
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_dca(q_vector);
#endif
ixgbe_for_each_ring(ring, q_vector->tx)
clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
if (q_vector->rx.count > 1)
per_ring_budget = max(budget/q_vector->rx.count, 1);
else
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx)
clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
per_ring_budget);
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
/* all work done, exit the polling mode */
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
return 0;
}
/** /**
* ixgbe_request_msix_irqs - Initialize MSI-X interrupts * ixgbe_request_msix_irqs - Initialize MSI-X interrupts
* @adapter: board private structure * @adapter: board private structure
...@@ -2807,6 +2861,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ...@@ -2807,6 +2861,11 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
| IXGBE_MRQC_RSS_FIELD_IPV6 | IXGBE_MRQC_RSS_FIELD_IPV6
| IXGBE_MRQC_RSS_FIELD_IPV6_TCP; | IXGBE_MRQC_RSS_FIELD_IPV6_TCP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP;
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP;
IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
} }
...@@ -4253,55 +4312,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ...@@ -4253,55 +4312,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
#endif #endif
} }
/**
* ixgbe_poll - NAPI Rx polling callback
* @napi: structure for representing this polling device
* @budget: how many packets driver is allowed to clean
*
* This function is used for legacy and MSI, NAPI mode
**/
static int ixgbe_poll(struct napi_struct *napi, int budget)
{
struct ixgbe_q_vector *q_vector =
container_of(napi, struct ixgbe_q_vector, napi);
struct ixgbe_adapter *adapter = q_vector->adapter;
struct ixgbe_ring *ring;
int per_ring_budget;
bool clean_complete = true;
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
ixgbe_update_dca(q_vector);
#endif
ixgbe_for_each_ring(ring, q_vector->tx)
clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring);
/* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */
if (q_vector->rx.count > 1)
per_ring_budget = max(budget/q_vector->rx.count, 1);
else
per_ring_budget = budget;
ixgbe_for_each_ring(ring, q_vector->rx)
clean_complete &= ixgbe_clean_rx_irq(q_vector, ring,
per_ring_budget);
/* If all work not completed, return budget and keep polling */
if (!clean_complete)
return budget;
/* all work done, exit the polling mode */
napi_complete(napi);
if (adapter->rx_itr_setting & 1)
ixgbe_set_itr(q_vector);
if (!test_bit(__IXGBE_DOWN, &adapter->state))
ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx));
return 0;
}
/** /**
* ixgbe_tx_timeout - Respond to a Tx Hang * ixgbe_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -4315,1023 +4325,141 @@ static void ixgbe_tx_timeout(struct net_device *netdev) ...@@ -4315,1023 +4325,141 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
} }
/** /**
* ixgbe_set_rss_queues: Allocate queues for RSS * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
*
* This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try
* to allocate one Rx queue per CPU, and if available, one Tx queue per CPU.
*
**/
static inline bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter)
{
bool ret = false;
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_RSS];
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
f->mask = 0xF;
adapter->num_rx_queues = f->indices;
adapter->num_tx_queues = f->indices;
ret = true;
} else {
ret = false;
}
return ret;
}
/**
* ixgbe_set_fdir_queues: Allocate queues for Flow Director
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
* *
* Flow Director is an advanced Rx filter, attempting to get Rx flows back * ixgbe_sw_init initializes the Adapter private data structure.
* to the original CPU that initiated the Tx session. This runs in addition * Fields are initialized based on PCI device information and
* to RSS, so if a packet doesn't match an FDIR filter, we can still spread the * OS network device settings (MTU size).
* Rx load across CPUs using RSS.
*
**/ **/
static inline bool ixgbe_set_fdir_queues(struct ixgbe_adapter *adapter) static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
{ {
bool ret = false; struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_ring_feature *f_fdir = &adapter->ring_feature[RING_F_FDIR]; struct pci_dev *pdev = adapter->pdev;
unsigned int rss;
#ifdef CONFIG_IXGBE_DCB
int j;
struct tc_configuration *tc;
#endif
f_fdir->indices = min((int)num_online_cpus(), f_fdir->indices); /* PCI config space info */
f_fdir->mask = 0;
/* hw->vendor_id = pdev->vendor;
* Use RSS in addition to Flow Director to ensure the best hw->device_id = pdev->device;
* distribution of flows across cores, even when an FDIR flow hw->revision_id = pdev->revision;
* isn't matched. hw->subsystem_vendor_id = pdev->subsystem_vendor;
*/ hw->subsystem_device_id = pdev->subsystem_device;
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
adapter->num_tx_queues = f_fdir->indices;
adapter->num_rx_queues = f_fdir->indices;
ret = true;
} else {
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
}
return ret;
}
/* Set capability flags */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
break;
case ixgbe_mac_X540:
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB:
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
/* Flow Director hash filters enabled */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/** adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
* ixgbe_set_fcoe_queues: Allocate queues for Fiber Channel over Ethernet (FCoE) adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
* @adapter: board private structure to initialize adapter->ring_feature[RING_F_FCOE].indices = 0;
* #ifdef CONFIG_IXGBE_DCB
* FCoE RX FCRETA can use up to 8 rx queues for up to 8 different exchanges. /* Default traffic class to use for FCoE */
* The ring feature mask is not used as a mask for FCoE, as it can take any 8 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
* rx queues out of the max number of rx queues, instead, it is used as the #endif
* index of the first rx queue used by FCoE. #endif /* IXGBE_FCOE */
* break;
**/ default:
static inline bool ixgbe_set_fcoe_queues(struct ixgbe_adapter *adapter) break;
{ }
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
f->indices = min_t(int, num_online_cpus(), f->indices); /* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
adapter->num_rx_queues = 1; #ifdef CONFIG_IXGBE_DCB
adapter->num_tx_queues = 1; switch (hw->mac.type) {
case ixgbe_mac_X540:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
default:
adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
break;
}
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { /* Configure DCB traffic classes */
e_info(probe, "FCoE enabled with RSS\n"); for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) tc = &adapter->dcb_cfg.tc_config[j];
ixgbe_set_fdir_queues(adapter); tc->path[DCB_TX_CONFIG].bwg_id = 0;
else tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
ixgbe_set_rss_queues(adapter); tc->path[DCB_RX_CONFIG].bwg_id = 0;
tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
tc->dcb_pfc = pfc_disabled;
} }
/* adding FCoE rx rings to the end */ /* Initialize default user to priority mapping, UPx->TC0 */
f->mask = adapter->num_rx_queues; tc = &adapter->dcb_cfg.tc_config[0];
adapter->num_rx_queues += f->indices; tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
adapter->num_tx_queues += f->indices; tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
return true; adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
} adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
#endif /* IXGBE_FCOE */ adapter->dcb_cfg.pfc_mode_enable = false;
adapter->dcb_set_bitmap = 0x00;
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
MAX_TRAFFIC_CLASS);
/* Artificial max queue cap per traffic class in DCB mode */ #endif
#define DCB_QUEUE_CAP 8
#ifdef CONFIG_IXGBE_DCB /* default flow control settings */
static inline bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) hw->fc.requested_mode = ixgbe_fc_full;
{ hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
int per_tc_q, q, i, offset = 0; #ifdef CONFIG_DCB
struct net_device *dev = adapter->netdev; adapter->last_lfc_mode = hw->fc.current_mode;
int tcs = netdev_get_num_tc(dev); #endif
ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
if (!tcs) /* enable itr by default in dynamic mode */
return false; adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
/* Map queue offset and counts onto allocated tx queues */ /* set default ring sizes */
per_tc_q = min_t(unsigned int, dev->num_tx_queues / tcs, DCB_QUEUE_CAP); adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
q = min_t(int, num_online_cpus(), per_tc_q); adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
/* set default work limits */
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
for (i = 0; i < tcs; i++) { /* initialize eeprom parameters */
netdev_set_tc_queue(dev, i, q, offset); if (ixgbe_init_eeprom_params_generic(hw)) {
offset += q; e_dev_err("EEPROM initialization failed\n");
return -EIO;
} }
adapter->num_tx_queues = q * tcs; set_bit(__IXGBE_DOWN, &adapter->state);
adapter->num_rx_queues = q * tcs;
#ifdef IXGBE_FCOE return 0;
/* FCoE enabled queues require special configuration indexed }
* by feature specific indices and mask. Here we map FCoE
* indices onto the DCB queue pairs allowing FCoE to own
* configuration later.
*/
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
u8 prio_tc[MAX_USER_PRIORITY] = {0};
int tc;
struct ixgbe_ring_feature *f =
&adapter->ring_feature[RING_F_FCOE];
ixgbe_dcb_unpack_map(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_tc);
tc = prio_tc[adapter->fcoe.up];
f->indices = dev->tc_to_txq[tc].count;
f->mask = dev->tc_to_txq[tc].offset;
}
#endif
return true;
}
#endif
/**
* ixgbe_set_sriov_queues: Allocate queues for IOV use
* @adapter: board private structure to initialize
*
* IOV doesn't actually use anything, so just NAK the
* request for now and let the other queue routines
* figure out what to do.
*/
static inline bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter)
{
return false;
}
/*
* ixgbe_set_num_queues: Allocate queues for device, feature dependent
* @adapter: board private structure to initialize
*
* This is the top level queue allocation routine. The order here is very
* important, starting with the "most" number of features turned on at once,
* and ending with the smallest set of features. This way large combinations
* can be allocated if they're turned on, and smaller combinations are the
* fallthrough conditions.
*
**/
static int ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
{
/* Start with base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
adapter->num_rx_pools = adapter->num_rx_queues;
adapter->num_rx_queues_per_pool = 1;
if (ixgbe_set_sriov_queues(adapter))
goto done;
#ifdef CONFIG_IXGBE_DCB
if (ixgbe_set_dcb_queues(adapter))
goto done;
#endif
#ifdef IXGBE_FCOE
if (ixgbe_set_fcoe_queues(adapter))
goto done;
#endif /* IXGBE_FCOE */
if (ixgbe_set_fdir_queues(adapter))
goto done;
if (ixgbe_set_rss_queues(adapter))
goto done;
/* fallback to base case */
adapter->num_rx_queues = 1;
adapter->num_tx_queues = 1;
done:
if ((adapter->netdev->reg_state == NETREG_UNREGISTERED) ||
(adapter->netdev->reg_state == NETREG_UNREGISTERING))
return 0;
/* Notify the stack of the (possibly) reduced queue counts. */
netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
return netif_set_real_num_rx_queues(adapter->netdev,
adapter->num_rx_queues);
}
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
int vectors)
{
int err, vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
* 2) Other (Link Status Change, etc.)
*/
vector_threshold = MIN_MSIX_COUNT;
/*
* The more we get, the more we will assign to Tx/Rx Cleanup
* for the separate queues...where Rx Cleanup >= Tx Cleanup.
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
while (vectors >= vector_threshold) {
err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
vectors);
if (!err) /* Success in acquiring all requested vectors. */
break;
else if (err < 0)
vectors = 0; /* Nasty failure, quit now */
else /* err == number of vectors we should try again with */
vectors = err;
}
if (vectors < vector_threshold) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
*/
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI-X interrupts\n");
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else {
adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */
/*
* Adjust for only the vectors we'll use, which is minimum
* of max_msix_q_vectors + NON_Q_VECTORS, or the number of
* vectors we were allocated.
*/
adapter->num_msix_vectors = min(vectors,
adapter->max_msix_q_vectors + NON_Q_VECTORS);
}
}
/**
* ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for RSS to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
{
int i;
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
return false;
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->reg_idx = i;
return true;
}
#ifdef CONFIG_IXGBE_DCB
/* ixgbe_get_first_reg_idx - Return first register index associated with ring */
static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc,
unsigned int *tx, unsigned int *rx)
{
struct net_device *dev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw;
u8 num_tcs = netdev_get_num_tc(dev);
*tx = 0;
*rx = 0;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
*tx = tc << 2;
*rx = tc << 3;
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
if (num_tcs > 4) {
if (tc < 3) {
*tx = tc << 5;
*rx = tc << 4;
} else if (tc < 5) {
*tx = ((tc + 2) << 4);
*rx = tc << 4;
} else if (tc < num_tcs) {
*tx = ((tc + 8) << 3);
*rx = tc << 4;
}
} else {
*rx = tc << 5;
switch (tc) {
case 0:
*tx = 0;
break;
case 1:
*tx = 64;
break;
case 2:
*tx = 96;
break;
case 3:
*tx = 112;
break;
default:
break;
}
}
break;
default:
break;
}
}
/**
* ixgbe_cache_ring_dcb - Descriptor ring to register mapping for DCB
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for DCB to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
{
struct net_device *dev = adapter->netdev;
int i, j, k;
u8 num_tcs = netdev_get_num_tc(dev);
if (!num_tcs)
return false;
for (i = 0, k = 0; i < num_tcs; i++) {
unsigned int tx_s, rx_s;
u16 count = dev->tc_to_txq[i].count;
ixgbe_get_first_reg_idx(adapter, i, &tx_s, &rx_s);
for (j = 0; j < count; j++, k++) {
adapter->tx_ring[k]->reg_idx = tx_s + j;
adapter->rx_ring[k]->reg_idx = rx_s + j;
adapter->tx_ring[k]->dcb_tc = i;
adapter->rx_ring[k]->dcb_tc = i;
}
}
return true;
}
#endif
/**
* ixgbe_cache_ring_fdir - Descriptor ring to register mapping for Flow Director
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for Flow Director to the assigned rings.
*
**/
static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
{
int i;
bool ret = false;
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
(adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)) {
for (i = 0; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = i;
for (i = 0; i < adapter->num_tx_queues; i++)
adapter->tx_ring[i]->reg_idx = i;
ret = true;
}
return ret;
}
#ifdef IXGBE_FCOE
/**
* ixgbe_cache_ring_fcoe - Descriptor ring to register mapping for the FCoE
* @adapter: board private structure to initialize
*
* Cache the descriptor ring offsets for FCoE mode to the assigned rings.
*
*/
static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
{
struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
int i;
u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
return false;
if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE)
ixgbe_cache_ring_fdir(adapter);
else
ixgbe_cache_ring_rss(adapter);
fcoe_rx_i = f->mask;
fcoe_tx_i = f->mask;
}
for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
}
return true;
}
#endif /* IXGBE_FCOE */
/**
* ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov
* @adapter: board private structure to initialize
*
* SR-IOV doesn't use any descriptor rings but changes the default if
* no other mapping is used.
*
*/
static inline bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter)
{
adapter->rx_ring[0]->reg_idx = adapter->num_vfs * 2;
adapter->tx_ring[0]->reg_idx = adapter->num_vfs * 2;
if (adapter->num_vfs)
return true;
else
return false;
}
/**
* ixgbe_cache_ring_register - Descriptor ring to register mapping
* @adapter: board private structure to initialize
*
* Once we know the feature-set enabled for the device, we'll cache
* the register offset the descriptor ring is assigned to.
*
* Note, the order the various feature calls is important. It must start with
* the "most" features enabled at the same time, then trickle down to the
* least amount of features turned on at once.
**/
static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
{
/* start with default case */
adapter->rx_ring[0]->reg_idx = 0;
adapter->tx_ring[0]->reg_idx = 0;
if (ixgbe_cache_ring_sriov(adapter))
return;
#ifdef CONFIG_IXGBE_DCB
if (ixgbe_cache_ring_dcb(adapter))
return;
#endif
#ifdef IXGBE_FCOE
if (ixgbe_cache_ring_fcoe(adapter))
return;
#endif /* IXGBE_FCOE */
if (ixgbe_cache_ring_fdir(adapter))
return;
if (ixgbe_cache_ring_rss(adapter))
return;
}
/**
* ixgbe_set_interrupt_capability - set MSI-X or MSI if supported
* @adapter: board private structure to initialize
*
* Attempt to configure the interrupts using the best available
* capabilities of the hardware and the kernel.
**/
static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
int err = 0;
int vector, v_budget;
/*
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for
* (roughly) the same number of vectors as there are CPU's.
* The default is to use pairs of vectors.
*/
v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues);
v_budget = min_t(int, v_budget, num_online_cpus());
v_budget += NON_Q_VECTORS;
/*
* At the same time, hardware can only support a maximum of
* hw.mac->max_msix_vectors vectors. With features
* such as RSS and VMDq, we can easily surpass the number of Rx and Tx
* descriptor queues supported by our device. Thus, we cap it off in
* those rare cases where the cpu count also exceeds our vector limit.
*/
v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors);
/* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (adapter->msix_entries) {
for (vector = 0; vector < v_budget; vector++)
adapter->msix_entries[vector].entry = vector;
ixgbe_acquire_msix_vectors(adapter, v_budget);
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
goto out;
}
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
e_err(probe,
"ATR is not supported while multiple "
"queues are disabled. Disabling Flow Director\n");
}
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 0;
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter);
err = ixgbe_set_num_queues(adapter);
if (err)
return err;
err = pci_enable_msi(adapter->pdev);
if (!err) {
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
} else {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
"Unable to allocate MSI interrupt, "
"falling back to legacy. Error: %d\n", err);
/* reset err */
err = 0;
}
out:
return err;
}
static void ixgbe_add_ring(struct ixgbe_ring *ring,
struct ixgbe_ring_container *head)
{
ring->next = head->ring;
head->ring = ring;
head->count++;
}
/**
* ixgbe_alloc_q_vector - Allocate memory for a single interrupt vector
* @adapter: board private structure to initialize
* @v_idx: index of vector in adapter struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
**/
static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, int v_idx,
int txr_count, int txr_idx,
int rxr_count, int rxr_idx)
{
struct ixgbe_q_vector *q_vector;
struct ixgbe_ring *ring;
int node = -1;
int cpu = -1;
int ring_count, size;
ring_count = txr_count + rxr_count;
size = sizeof(struct ixgbe_q_vector) +
(sizeof(struct ixgbe_ring) * ring_count);
/* customize cpu for Flow Director mapping */
if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) {
if (cpu_online(v_idx)) {
cpu = v_idx;
node = cpu_to_node(cpu);
}
}
/* allocate q_vector and rings */
q_vector = kzalloc_node(size, GFP_KERNEL, node);
if (!q_vector)
q_vector = kzalloc(size, GFP_KERNEL);
if (!q_vector)
return -ENOMEM;
/* setup affinity mask and node */
if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask);
else
cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
q_vector->numa_node = node;
/* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64);
/* tie q_vector and adapter together */
adapter->q_vector[v_idx] = q_vector;
q_vector->adapter = adapter;
q_vector->v_idx = v_idx;
/* initialize work limits */
q_vector->tx.work_limit = adapter->tx_work_limit;
/* initialize pointer to rings */
ring = q_vector->ring;
while (txr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Tx values */
ixgbe_add_ring(ring, &q_vector->tx);
/* apply Tx specific ring traits */
ring->count = adapter->tx_ring_count;
ring->queue_index = txr_idx;
/* assign ring to adapter */
adapter->tx_ring[txr_idx] = ring;
/* update count and index */
txr_count--;
txr_idx++;
/* push pointer to next ring */
ring++;
}
while (rxr_count) {
/* assign generic ring traits */
ring->dev = &adapter->pdev->dev;
ring->netdev = adapter->netdev;
/* configure backlink on ring */
ring->q_vector = q_vector;
/* update q_vector Rx values */
ixgbe_add_ring(ring, &q_vector->rx);
/*
* 82599 errata, UDP frames with a 0 checksum
* can be marked as checksum errors.
*/
if (adapter->hw.mac.type == ixgbe_mac_82599EB)
set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state);
/* apply Rx specific ring traits */
ring->count = adapter->rx_ring_count;
ring->queue_index = rxr_idx;
/* assign ring to adapter */
adapter->rx_ring[rxr_idx] = ring;
/* update count and index */
rxr_count--;
rxr_idx++;
/* push pointer to next ring */
ring++;
}
return 0;
}
/**
* ixgbe_free_q_vector - Free memory allocated for specific interrupt vector
* @adapter: board private structure to initialize
* @v_idx: Index of vector to be freed
*
* This function frees the memory allocated to the q_vector. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx)
{
struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx];
struct ixgbe_ring *ring;
ixgbe_for_each_ring(ring, q_vector->tx)
adapter->tx_ring[ring->queue_index] = NULL;
ixgbe_for_each_ring(ring, q_vector->rx)
adapter->rx_ring[ring->queue_index] = NULL;
adapter->q_vector[v_idx] = NULL;
netif_napi_del(&q_vector->napi);
/*
* ixgbe_get_stats64() might access the rings on this vector,
* we must wait a grace period before freeing it.
*/
kfree_rcu(q_vector, rcu);
}
/**
* ixgbe_alloc_q_vectors - Allocate memory for interrupt vectors
* @adapter: board private structure to initialize
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
**/
static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter)
{
int q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
int rxr_remaining = adapter->num_rx_queues;
int txr_remaining = adapter->num_tx_queues;
int rxr_idx = 0, txr_idx = 0, v_idx = 0;
int err;
/* only one q_vector if MSI-X is disabled. */
if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
q_vectors = 1;
if (q_vectors >= (rxr_remaining + txr_remaining)) {
for (; rxr_remaining; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
err = ixgbe_alloc_q_vector(adapter, v_idx,
0, 0, rqpv, rxr_idx);
if (err)
goto err_out;
/* update counts and index */
rxr_remaining -= rqpv;
rxr_idx += rqpv;
}
}
for (; q_vectors; v_idx++, q_vectors--) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors);
err = ixgbe_alloc_q_vector(adapter, v_idx,
tqpv, txr_idx,
rqpv, rxr_idx);
if (err)
goto err_out;
/* update counts and index */
rxr_remaining -= rqpv;
rxr_idx += rqpv;
txr_remaining -= tqpv;
txr_idx += tqpv;
}
return 0;
err_out:
while (v_idx) {
v_idx--;
ixgbe_free_q_vector(adapter, v_idx);
}
return -ENOMEM;
}
/**
* ixgbe_free_q_vectors - Free memory allocated for interrupt vectors
* @adapter: board private structure to initialize
*
* This function frees the memory allocated to the q_vectors. In addition if
* NAPI is enabled it will delete any references to the NAPI struct prior
* to freeing the q_vector.
**/
static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter)
{
int v_idx, q_vectors;
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
else
q_vectors = 1;
for (v_idx = 0; v_idx < q_vectors; v_idx++)
ixgbe_free_q_vector(adapter, v_idx);
}
static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter)
{
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED;
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
} else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) {
adapter->flags &= ~IXGBE_FLAG_MSI_ENABLED;
pci_disable_msi(adapter->pdev);
}
}
/**
* ixgbe_init_interrupt_scheme - Determine proper interrupt scheme
* @adapter: board private structure to initialize
*
* We determine which interrupt scheme to use based on...
* - Kernel support (MSI, MSI-X)
* - which can be user-defined (via MODULE_PARAM)
* - Hardware queue count (num_*_queues)
* - defined by miscellaneous hardware support/features (RSS, etc.)
**/
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter)
{
int err;
/* Number of supported queues */
err = ixgbe_set_num_queues(adapter);
if (err)
return err;
err = ixgbe_set_interrupt_capability(adapter);
if (err) {
e_dev_err("Unable to setup interrupt capabilities\n");
goto err_set_interrupt;
}
err = ixgbe_alloc_q_vectors(adapter);
if (err) {
e_dev_err("Unable to allocate memory for queue vectors\n");
goto err_alloc_q_vectors;
}
ixgbe_cache_ring_register(adapter);
e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" : "Disabled",
adapter->num_rx_queues, adapter->num_tx_queues);
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
err_alloc_q_vectors:
ixgbe_reset_interrupt_capability(adapter);
err_set_interrupt:
return err;
}
/**
* ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
* @adapter: board private structure to clear interrupt scheme on
*
* We go through and clear interrupt specific resources and reset the structure
* to pre-load conditions
**/
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
{
adapter->num_tx_queues = 0;
adapter->num_rx_queues = 0;
ixgbe_free_q_vectors(adapter);
ixgbe_reset_interrupt_capability(adapter);
}
/**
* ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter)
* @adapter: board private structure to initialize
*
* ixgbe_sw_init initializes the Adapter private data structure.
* Fields are initialized based on PCI device information and
* OS network device settings (MTU size).
**/
static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
unsigned int rss;
#ifdef CONFIG_IXGBE_DCB
int j;
struct tc_configuration *tc;
#endif
/* PCI config space info */
hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device;
hw->revision_id = pdev->revision;
hw->subsystem_vendor_id = pdev->subsystem_vendor;
hw->subsystem_device_id = pdev->subsystem_device;
/* Set capability flags */
rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus());
adapter->ring_feature[RING_F_RSS].indices = rss;
adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
if (hw->device_id == IXGBE_DEV_ID_82598AT)
adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
break;
case ixgbe_mac_X540:
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
case ixgbe_mac_82599EB:
adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
/* Flow Director hash filters enabled */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->atr_sample_rate = 20;
adapter->ring_feature[RING_F_FDIR].indices =
IXGBE_MAX_FDIR_INDICES;
adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K;
#ifdef IXGBE_FCOE
adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE;
adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED;
adapter->ring_feature[RING_F_FCOE].indices = 0;
#ifdef CONFIG_IXGBE_DCB
/* Default traffic class to use for FCoE */
adapter->fcoe.up = IXGBE_FCOE_DEFTC;
#endif
#endif /* IXGBE_FCOE */
break;
default:
break;
}
/* n-tuple support exists, always init our spinlock */
spin_lock_init(&adapter->fdir_perfect_lock);
#ifdef CONFIG_IXGBE_DCB
switch (hw->mac.type) {
case ixgbe_mac_X540:
adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS;
break;
default:
adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS;
adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS;
break;
}
/* Configure DCB traffic classes */
for (j = 0; j < MAX_TRAFFIC_CLASS; j++) {
tc = &adapter->dcb_cfg.tc_config[j];
tc->path[DCB_TX_CONFIG].bwg_id = 0;
tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1);
tc->path[DCB_RX_CONFIG].bwg_id = 0;
tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1);
tc->dcb_pfc = pfc_disabled;
}
/* Initialize default user to priority mapping, UPx->TC0 */
tc = &adapter->dcb_cfg.tc_config[0];
tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100;
adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100;
adapter->dcb_cfg.pfc_mode_enable = false;
adapter->dcb_set_bitmap = 0x00;
adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE;
ixgbe_copy_dcb_cfg(&adapter->dcb_cfg, &adapter->temp_dcb_cfg,
MAX_TRAFFIC_CLASS);
#endif
/* default flow control settings */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */
#ifdef CONFIG_DCB
adapter->last_lfc_mode = hw->fc.current_mode;
#endif
ixgbe_pbthresh_setup(adapter);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
/* enable itr by default in dynamic mode */
adapter->rx_itr_setting = 1;
adapter->tx_itr_setting = 1;
/* set default ring sizes */
adapter->tx_ring_count = IXGBE_DEFAULT_TXD;
adapter->rx_ring_count = IXGBE_DEFAULT_RXD;
/* set default work limits */
adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK;
/* initialize eeprom parameters */
if (ixgbe_init_eeprom_params_generic(hw)) {
e_dev_err("EEPROM initialization failed\n");
return -EIO;
}
set_bit(__IXGBE_DOWN, &adapter->state);
return 0;
}
/** /**
* ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
...@@ -6235,7 +5363,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) ...@@ -6235,7 +5363,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter)
* print link down message * print link down message
* @adapter - pointer to the adapter structure * @adapter - pointer to the adapter structure
**/ **/
static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter* adapter) static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
...@@ -6480,41 +5608,32 @@ static void ixgbe_service_timer(unsigned long data) ...@@ -6480,41 +5608,32 @@ static void ixgbe_service_timer(unsigned long data)
unsigned long next_event_offset; unsigned long next_event_offset;
bool ready = true; bool ready = true;
#ifdef CONFIG_PCI_IOV /* poll faster when waiting for link */
ready = false; if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
next_event_offset = HZ / 10;
else
next_event_offset = HZ * 2;
#ifdef CONFIG_PCI_IOV
/* /*
* don't bother with SR-IOV VF DMA hang check if there are * don't bother with SR-IOV VF DMA hang check if there are
* no VFs or the link is down * no VFs or the link is down
*/ */
if (!adapter->num_vfs || if (!adapter->num_vfs ||
(adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) { (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE))
ready = true;
goto normal_timer_service; goto normal_timer_service;
}
/* If we have VFs allocated then we must check for DMA hangs */ /* If we have VFs allocated then we must check for DMA hangs */
ixgbe_check_for_bad_vf(adapter); ixgbe_check_for_bad_vf(adapter);
next_event_offset = HZ / 50; next_event_offset = HZ / 50;
adapter->timer_event_accumulator++; adapter->timer_event_accumulator++;
if (adapter->timer_event_accumulator >= 100) { if (adapter->timer_event_accumulator >= 100)
ready = true;
adapter->timer_event_accumulator = 0; adapter->timer_event_accumulator = 0;
}
goto schedule_event;
normal_timer_service:
#endif
/* poll faster when waiting for link */
if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)
next_event_offset = HZ / 10;
else else
next_event_offset = HZ * 2; ready = false;
#ifdef CONFIG_PCI_IOV normal_timer_service:
schedule_event:
#endif #endif
/* Reset the timer */ /* Reset the timer */
mod_timer(&adapter->service_timer, next_event_offset + jiffies); mod_timer(&adapter->service_timer, next_event_offset + jiffies);
...@@ -6563,32 +5682,11 @@ static void ixgbe_service_task(struct work_struct *work) ...@@ -6563,32 +5682,11 @@ static void ixgbe_service_task(struct work_struct *work)
ixgbe_service_event_complete(adapter); ixgbe_service_event_complete(adapter);
} }
void ixgbe_tx_ctxtdesc(struct ixgbe_ring *tx_ring, u32 vlan_macip_lens,
u32 fcoe_sof_eof, u32 type_tucmd, u32 mss_l4len_idx)
{
struct ixgbe_adv_tx_context_desc *context_desc;
u16 i = tx_ring->next_to_use;
context_desc = IXGBE_TX_CTXTDESC(tx_ring, i);
i++;
tx_ring->next_to_use = (i < tx_ring->count) ? i : 0;
/* set bits to identify this as an advanced context descriptor */
type_tucmd |= IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT;
context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens);
context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof);
context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd);
context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
}
static int ixgbe_tso(struct ixgbe_ring *tx_ring, static int ixgbe_tso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first,
u32 tx_flags, __be16 protocol, u8 *hdr_len) u8 *hdr_len)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
int err;
u32 vlan_macip_lens, type_tucmd; u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len; u32 mss_l4len_idx, l4len;
...@@ -6596,7 +5694,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -6596,7 +5694,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
return 0; return 0;
if (skb_header_cloned(skb)) { if (skb_header_cloned(skb)) {
err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
if (err) if (err)
return err; return err;
} }
...@@ -6604,7 +5702,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -6604,7 +5702,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP;
if (protocol == __constant_htons(ETH_P_IP)) { if (first->protocol == __constant_htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0; iph->tot_len = 0;
iph->check = 0; iph->check = 0;
...@@ -6613,12 +5711,17 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -6613,12 +5711,17 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
IPPROTO_TCP, IPPROTO_TCP,
0); 0);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM |
IXGBE_TX_FLAGS_IPV4;
} else if (skb_is_gso_v6(skb)) { } else if (skb_is_gso_v6(skb)) {
ipv6_hdr(skb)->payload_len = 0; ipv6_hdr(skb)->payload_len = 0;
tcp_hdr(skb)->check = tcp_hdr(skb)->check =
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, &ipv6_hdr(skb)->daddr,
0, IPPROTO_TCP, 0); 0, IPPROTO_TCP, 0);
first->tx_flags |= IXGBE_TX_FLAGS_TSO |
IXGBE_TX_FLAGS_CSUM;
} }
/* compute header lengths */ /* compute header lengths */
...@@ -6637,17 +5740,16 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -6637,17 +5740,16 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */
vlan_macip_lens = skb_network_header_len(skb); vlan_macip_lens = skb_network_header_len(skb);
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd,
mss_l4len_idx); mss_l4len_idx);
return 1; return 1;
} }
static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first)
u32 tx_flags, __be16 protocol)
{ {
struct sk_buff *skb = first->skb; struct sk_buff *skb = first->skb;
u32 vlan_macip_lens = 0; u32 vlan_macip_lens = 0;
...@@ -6655,12 +5757,12 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, ...@@ -6655,12 +5757,12 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
u32 type_tucmd = 0; u32 type_tucmd = 0;
if (skb->ip_summed != CHECKSUM_PARTIAL) { if (skb->ip_summed != CHECKSUM_PARTIAL) {
if (!(tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) &&
!(tx_flags & IXGBE_TX_FLAGS_TXSW)) !(first->tx_flags & IXGBE_TX_FLAGS_TXSW))
return false; return;
} else { } else {
u8 l4_hdr = 0; u8 l4_hdr = 0;
switch (protocol) { switch (first->protocol) {
case __constant_htons(ETH_P_IP): case __constant_htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb); vlan_macip_lens |= skb_network_header_len(skb);
type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4;
...@@ -6674,7 +5776,7 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, ...@@ -6674,7 +5776,7 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
if (unlikely(net_ratelimit())) { if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev, dev_warn(tx_ring->dev,
"partial checksum but proto=%x!\n", "partial checksum but proto=%x!\n",
skb->protocol); first->protocol);
} }
break; break;
} }
...@@ -6698,19 +5800,21 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring, ...@@ -6698,19 +5800,21 @@ static bool ixgbe_tx_csum(struct ixgbe_ring *tx_ring,
if (unlikely(net_ratelimit())) { if (unlikely(net_ratelimit())) {
dev_warn(tx_ring->dev, dev_warn(tx_ring->dev,
"partial checksum but l4 proto=%x!\n", "partial checksum but l4 proto=%x!\n",
skb->protocol); l4_hdr);
} }
break; break;
} }
/* update TX checksum flag */
first->tx_flags |= IXGBE_TX_FLAGS_CSUM;
} }
/* vlan_macip_lens: MACLEN, VLAN tag */
vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT;
vlan_macip_lens |= tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK;
ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0,
type_tucmd, mss_l4len_idx); type_tucmd, mss_l4len_idx);
return (skb->ip_summed == CHECKSUM_PARTIAL);
} }
static __le32 ixgbe_tx_cmd_type(u32 tx_flags) static __le32 ixgbe_tx_cmd_type(u32 tx_flags)
...@@ -6775,7 +5879,6 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, ...@@ -6775,7 +5879,6 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc,
static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first,
u32 tx_flags,
const u8 hdr_len) const u8 hdr_len)
{ {
dma_addr_t dma; dma_addr_t dma;
...@@ -6786,6 +5889,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ...@@ -6786,6 +5889,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
unsigned int data_len = skb->data_len; unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb); unsigned int size = skb_headlen(skb);
unsigned int paylen = skb->len - hdr_len; unsigned int paylen = skb->len - hdr_len;
u32 tx_flags = first->tx_flags;
__le32 cmd_type; __le32 cmd_type;
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
...@@ -6812,7 +5916,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ...@@ -6812,7 +5916,6 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
/* record length, and DMA address */ /* record length, and DMA address */
dma_unmap_len_set(first, len, size); dma_unmap_len_set(first, len, size);
dma_unmap_addr_set(first, dma, dma); dma_unmap_addr_set(first, dma, dma);
first->tx_flags = tx_flags;
tx_desc->read.buffer_addr = cpu_to_le64(dma); tx_desc->read.buffer_addr = cpu_to_le64(dma);
...@@ -6921,8 +6024,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, ...@@ -6921,8 +6024,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring,
} }
static void ixgbe_atr(struct ixgbe_ring *ring, static void ixgbe_atr(struct ixgbe_ring *ring,
struct ixgbe_tx_buffer *first, struct ixgbe_tx_buffer *first)
u32 tx_flags, __be16 protocol)
{ {
struct ixgbe_q_vector *q_vector = ring->q_vector; struct ixgbe_q_vector *q_vector = ring->q_vector;
union ixgbe_atr_hash_dword input = { .dword = 0 }; union ixgbe_atr_hash_dword input = { .dword = 0 };
...@@ -6949,9 +6051,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ...@@ -6949,9 +6051,9 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
hdr.network = skb_network_header(first->skb); hdr.network = skb_network_header(first->skb);
/* Currently only IPv4/IPv6 with TCP is supported */ /* Currently only IPv4/IPv6 with TCP is supported */
if ((protocol != __constant_htons(ETH_P_IPV6) || if ((first->protocol != __constant_htons(ETH_P_IPV6) ||
hdr.ipv6->nexthdr != IPPROTO_TCP) && hdr.ipv6->nexthdr != IPPROTO_TCP) &&
(protocol != __constant_htons(ETH_P_IP) || (first->protocol != __constant_htons(ETH_P_IP) ||
hdr.ipv4->protocol != IPPROTO_TCP)) hdr.ipv4->protocol != IPPROTO_TCP))
return; return;
...@@ -6968,7 +6070,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ...@@ -6968,7 +6070,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
/* reset sample count */ /* reset sample count */
ring->atr_count = 0; ring->atr_count = 0;
vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); vlan_id = htons(first->tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
/* /*
* src and dst are inverted, think how the receiver sees them * src and dst are inverted, think how the receiver sees them
...@@ -6983,13 +6085,13 @@ static void ixgbe_atr(struct ixgbe_ring *ring, ...@@ -6983,13 +6085,13 @@ static void ixgbe_atr(struct ixgbe_ring *ring,
* since src port and flex bytes occupy the same word XOR them together * since src port and flex bytes occupy the same word XOR them together
* and write the value to source port portion of compressed dword * and write the value to source port portion of compressed dword
*/ */
if (tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN)) if (first->tx_flags & (IXGBE_TX_FLAGS_SW_VLAN | IXGBE_TX_FLAGS_HW_VLAN))
common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q); common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
else else
common.port.src ^= th->dest ^ protocol; common.port.src ^= th->dest ^ first->protocol;
common.port.dst ^= th->source; common.port.dst ^= th->source;
if (protocol == __constant_htons(ETH_P_IP)) { if (first->protocol == __constant_htons(ETH_P_IP)) {
input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
} else { } else {
...@@ -7145,43 +6247,36 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, ...@@ -7145,43 +6247,36 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
} }
} }
/* record initial flags and protocol */
first->tx_flags = tx_flags;
first->protocol = protocol;
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
/* setup tx offload for FCoE */ /* setup tx offload for FCoE */
if ((protocol == __constant_htons(ETH_P_FCOE)) && if ((protocol == __constant_htons(ETH_P_FCOE)) &&
(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) { (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) {
tso = ixgbe_fso(tx_ring, first, tx_flags, &hdr_len); tso = ixgbe_fso(tx_ring, first, &hdr_len);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
else if (tso)
tx_flags |= IXGBE_TX_FLAGS_FSO |
IXGBE_TX_FLAGS_FCOE;
else
tx_flags |= IXGBE_TX_FLAGS_FCOE;
goto xmit_fcoe; goto xmit_fcoe;
} }
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
/* setup IPv4/IPv6 offloads */ tso = ixgbe_tso(tx_ring, first, &hdr_len);
if (protocol == __constant_htons(ETH_P_IP))
tx_flags |= IXGBE_TX_FLAGS_IPV4;
tso = ixgbe_tso(tx_ring, first, tx_flags, protocol, &hdr_len);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
else if (tso) else if (!tso)
tx_flags |= IXGBE_TX_FLAGS_TSO | IXGBE_TX_FLAGS_CSUM; ixgbe_tx_csum(tx_ring, first);
else if (ixgbe_tx_csum(tx_ring, first, tx_flags, protocol))
tx_flags |= IXGBE_TX_FLAGS_CSUM;
/* add the ATR filter if ATR is on */ /* add the ATR filter if ATR is on */
if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
ixgbe_atr(tx_ring, first, tx_flags, protocol); ixgbe_atr(tx_ring, first);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
xmit_fcoe: xmit_fcoe:
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
ixgbe_tx_map(tx_ring, first, tx_flags, hdr_len); ixgbe_tx_map(tx_ring, first, hdr_len);
ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
...@@ -7347,8 +6442,8 @@ static void ixgbe_netpoll(struct net_device *netdev) ...@@ -7347,8 +6442,8 @@ static void ixgbe_netpoll(struct net_device *netdev)
} }
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
} }
#endif
#endif
static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats) struct rtnl_link_stats64 *stats)
{ {
...@@ -7397,6 +6492,7 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, ...@@ -7397,6 +6492,7 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
return stats; return stats;
} }
#ifdef CONFIG_IXGBE_DCB
/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. /* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid.
* #adapter: pointer to ixgbe_adapter * #adapter: pointer to ixgbe_adapter
* @tc: number of traffic classes currently enabled * @tc: number of traffic classes currently enabled
...@@ -7433,7 +6529,6 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) ...@@ -7433,7 +6529,6 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc)
return; return;
} }
/* ixgbe_setup_tc - routine to configure net_device for multiple traffic /* ixgbe_setup_tc - routine to configure net_device for multiple traffic
* classes. * classes.
* *
...@@ -7453,7 +6548,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -7453,7 +6548,8 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
/* Hardware supports up to 8 traffic classes */ /* Hardware supports up to 8 traffic classes */
if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || if (tc > adapter->dcb_cfg.num_tcs.pg_tcs ||
(hw->mac.type == ixgbe_mac_82598EB && tc < MAX_TRAFFIC_CLASS)) (hw->mac.type == ixgbe_mac_82598EB &&
tc < MAX_TRAFFIC_CLASS))
return -EINVAL; return -EINVAL;
/* Hardware has to reinitialize queues and interrupts to /* Hardware has to reinitialize queues and interrupts to
...@@ -7467,7 +6563,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -7467,7 +6563,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
if (tc) { if (tc) {
netdev_set_num_tc(dev, tc); netdev_set_num_tc(dev, tc);
adapter->last_lfc_mode = adapter->hw.fc.current_mode; adapter->last_lfc_mode = adapter->hw.fc.current_mode;
adapter->flags |= IXGBE_FLAG_DCB_ENABLED; adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
...@@ -7475,7 +6570,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -7475,7 +6570,6 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
adapter->hw.fc.requested_mode = ixgbe_fc_none; adapter->hw.fc.requested_mode = ixgbe_fc_none;
} else { } else {
netdev_reset_tc(dev); netdev_reset_tc(dev);
adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
...@@ -7493,6 +6587,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) ...@@ -7493,6 +6587,7 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc)
return 0; return 0;
} }
#endif /* CONFIG_IXGBE_DCB */
void ixgbe_do_reset(struct net_device *netdev) void ixgbe_do_reset(struct net_device *netdev)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
...@@ -7504,54 +6599,52 @@ void ixgbe_do_reset(struct net_device *netdev) ...@@ -7504,54 +6599,52 @@ void ixgbe_do_reset(struct net_device *netdev)
} }
static netdev_features_t ixgbe_fix_features(struct net_device *netdev, static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
netdev_features_t data) netdev_features_t features)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
data &= ~NETIF_F_HW_VLAN_RX; features &= ~NETIF_F_HW_VLAN_RX;
#endif #endif
/* return error if RXHASH is being enabled when RSS is not supported */ /* return error if RXHASH is being enabled when RSS is not supported */
if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
data &= ~NETIF_F_RXHASH; features &= ~NETIF_F_RXHASH;
/* If Rx checksum is disabled, then RSC/LRO should also be disabled */ /* If Rx checksum is disabled, then RSC/LRO should also be disabled */
if (!(data & NETIF_F_RXCSUM)) if (!(features & NETIF_F_RXCSUM))
data &= ~NETIF_F_LRO; features &= ~NETIF_F_LRO;
/* Turn off LRO if not RSC capable or invalid ITR settings */ /* Turn off LRO if not RSC capable */
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
data &= ~NETIF_F_LRO; features &= ~NETIF_F_LRO;
} else if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
(adapter->rx_itr_setting != 1 &&
adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE)) {
data &= ~NETIF_F_LRO;
e_info(probe, "rx-usecs set too low, not enabling RSC\n");
}
return data; return features;
} }
static int ixgbe_set_features(struct net_device *netdev, static int ixgbe_set_features(struct net_device *netdev,
netdev_features_t data) netdev_features_t features)
{ {
struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_adapter *adapter = netdev_priv(netdev);
netdev_features_t changed = netdev->features ^ data; netdev_features_t changed = netdev->features ^ features;
bool need_reset = false; bool need_reset = false;
/* Make sure RSC matches LRO, reset if change */ /* Make sure RSC matches LRO, reset if change */
if (!!(data & NETIF_F_LRO) != if (!(features & NETIF_F_LRO)) {
!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
case ixgbe_mac_82599EB:
need_reset = true; need_reset = true;
break; adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
default: } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
break; !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
if (adapter->rx_itr_setting == 1 ||
adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
need_reset = true;
} else if ((changed ^ features) & NETIF_F_LRO) {
e_info(probe, "rx-usecs set too low, "
"disabling RSC\n");
} }
} }
...@@ -7559,31 +6652,30 @@ static int ixgbe_set_features(struct net_device *netdev, ...@@ -7559,31 +6652,30 @@ static int ixgbe_set_features(struct net_device *netdev,
* Check if Flow Director n-tuple support was enabled or disabled. If * Check if Flow Director n-tuple support was enabled or disabled. If
* the state changed, we need to reset. * the state changed, we need to reset.
*/ */
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) { if (!(features & NETIF_F_NTUPLE)) {
/* turn off ATR, enable perfect filters and reset */ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) {
if (data & NETIF_F_NTUPLE) { /* turn off Flow Director, set ATR and reset */
adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) &&
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
need_reset = true; need_reset = true;
} }
} else if (!(data & NETIF_F_NTUPLE)) {
/* turn off Flow Director, set ATR and reset */
adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
if ((adapter->flags & IXGBE_FLAG_RSS_ENABLED) && } else if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) /* turn off ATR, enable perfect filters and reset */
adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
need_reset = true; need_reset = true;
} }
if (changed & NETIF_F_RXALL) if (changed & NETIF_F_RXALL)
need_reset = true; need_reset = true;
netdev->features = data; netdev->features = features;
if (need_reset) if (need_reset)
ixgbe_do_reset(netdev); ixgbe_do_reset(netdev);
return 0; return 0;
} }
static const struct net_device_ops ixgbe_netdev_ops = { static const struct net_device_ops ixgbe_netdev_ops = {
...@@ -7591,7 +6683,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { ...@@ -7591,7 +6683,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_stop = ixgbe_close, .ndo_stop = ixgbe_close,
.ndo_start_xmit = ixgbe_xmit_frame, .ndo_start_xmit = ixgbe_xmit_frame,
.ndo_select_queue = ixgbe_select_queue, .ndo_select_queue = ixgbe_select_queue,
.ndo_set_rx_mode = ixgbe_set_rx_mode, .ndo_set_rx_mode = ixgbe_set_rx_mode,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = ixgbe_set_mac, .ndo_set_mac_address = ixgbe_set_mac,
.ndo_change_mtu = ixgbe_change_mtu, .ndo_change_mtu = ixgbe_change_mtu,
...@@ -7602,10 +6694,12 @@ static const struct net_device_ops ixgbe_netdev_ops = { ...@@ -7602,10 +6694,12 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
.ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64, .ndo_get_stats64 = ixgbe_get_stats64,
#ifdef CONFIG_IXGBE_DCB
.ndo_setup_tc = ixgbe_setup_tc, .ndo_setup_tc = ixgbe_setup_tc,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ixgbe_netpoll, .ndo_poll_controller = ixgbe_netpoll,
#endif #endif
...@@ -7623,7 +6717,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { ...@@ -7623,7 +6717,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
}; };
static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter, static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
const struct ixgbe_info *ii) const struct ixgbe_info *ii)
{ {
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
...@@ -7904,7 +6998,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7904,7 +6998,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) {
e_dev_err("The EEPROM Checksum Is Not Valid\n"); e_dev_err("The EEPROM Checksum Is Not Valid\n");
err = -EIO; err = -EIO;
goto err_eeprom; goto err_sw_init;
} }
memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len);
...@@ -7913,11 +7007,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -7913,11 +7007,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
if (ixgbe_validate_mac_addr(netdev->perm_addr)) { if (ixgbe_validate_mac_addr(netdev->perm_addr)) {
e_dev_err("invalid MAC address\n"); e_dev_err("invalid MAC address\n");
err = -EIO; err = -EIO;
goto err_eeprom; goto err_sw_init;
} }
setup_timer(&adapter->service_timer, &ixgbe_service_timer, setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter); (unsigned long) adapter);
INIT_WORK(&adapter->service_task, ixgbe_service_task); INIT_WORK(&adapter->service_task, ixgbe_service_task);
clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state);
...@@ -8005,7 +7099,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -8005,7 +7099,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
/* reset the hardware with the new settings */ /* reset the hardware with the new settings */
err = hw->mac.ops.start_hw(hw); err = hw->mac.ops.start_hw(hw);
if (err == IXGBE_ERR_EEPROM_VERSION) { if (err == IXGBE_ERR_EEPROM_VERSION) {
/* We are running on a pre-production device, log a warning */ /* We are running on a pre-production device, log a warning */
e_dev_warn("This device is a pre-production adapter/LOM. " e_dev_warn("This device is a pre-production adapter/LOM. "
...@@ -8060,7 +7153,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, ...@@ -8060,7 +7153,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
ixgbe_release_hw_control(adapter); ixgbe_release_hw_control(adapter);
ixgbe_clear_interrupt_scheme(adapter); ixgbe_clear_interrupt_scheme(adapter);
err_sw_init: err_sw_init:
err_eeprom:
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
ixgbe_disable_sriov(adapter); ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment