Commit 81a430ac authored by David S. Miller's avatar David S. Miller
parents c2ec3ff6 729739b7
...@@ -134,6 +134,8 @@ ...@@ -134,6 +134,8 @@
#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */ #define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */ #define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */ #define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define E1000_RCTL_DPF 0x00400000 /* Discard Pause Frames */
#define E1000_RCTL_PMCF 0x00800000 /* pass MAC control frames */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */ #define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
/* /*
......
...@@ -1769,10 +1769,21 @@ static int igb_set_features(struct net_device *netdev, ...@@ -1769,10 +1769,21 @@ static int igb_set_features(struct net_device *netdev,
netdev_features_t features) netdev_features_t features)
{ {
netdev_features_t changed = netdev->features ^ features; netdev_features_t changed = netdev->features ^ features;
struct igb_adapter *adapter = netdev_priv(netdev);
if (changed & NETIF_F_HW_VLAN_RX) if (changed & NETIF_F_HW_VLAN_RX)
igb_vlan_mode(netdev, features); igb_vlan_mode(netdev, features);
if (!(changed & NETIF_F_RXALL))
return 0;
netdev->features = features;
if (netif_running(netdev))
igb_reinit_locked(adapter);
else
igb_reset(adapter);
return 0; return 0;
} }
...@@ -1954,6 +1965,7 @@ static int __devinit igb_probe(struct pci_dev *pdev, ...@@ -1954,6 +1965,7 @@ static int __devinit igb_probe(struct pci_dev *pdev,
/* copy netdev features into list of user selectable features */ /* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features; netdev->hw_features |= netdev->features;
netdev->hw_features |= NETIF_F_RXALL;
/* set this bit last since it cannot be part of hw_features */ /* set this bit last since it cannot be part of hw_features */
netdev->features |= NETIF_F_HW_VLAN_FILTER; netdev->features |= NETIF_F_HW_VLAN_FILTER;
...@@ -1964,6 +1976,8 @@ static int __devinit igb_probe(struct pci_dev *pdev, ...@@ -1964,6 +1976,8 @@ static int __devinit igb_probe(struct pci_dev *pdev,
NETIF_F_IPV6_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_SG; NETIF_F_SG;
netdev->priv_flags |= IFF_SUPP_NOFCS;
if (pci_using_dac) { if (pci_using_dac) {
netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HIGHDMA;
netdev->vlan_features |= NETIF_F_HIGHDMA; netdev->vlan_features |= NETIF_F_HIGHDMA;
...@@ -3003,6 +3017,22 @@ void igb_setup_rctl(struct igb_adapter *adapter) ...@@ -3003,6 +3017,22 @@ void igb_setup_rctl(struct igb_adapter *adapter)
wr32(E1000_QDE, ALL_QUEUES); wr32(E1000_QDE, ALL_QUEUES);
} }
/* This is useful for sniffing bad packets. */
if (adapter->netdev->features & NETIF_F_RXALL) {
/* UPE and MPE will be handled by normal PROMISC logic
* in e1000e_set_rx_mode */
rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
E1000_RCTL_BAM | /* RX All Bcast Pkts */
E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
E1000_RCTL_DPF | /* Allow filtered pause */
E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
* and that breaks VLANs.
*/
}
wr32(E1000_RCTL, rctl); wr32(E1000_RCTL, rctl);
} }
...@@ -4293,6 +4323,8 @@ static void igb_tx_map(struct igb_ring *tx_ring, ...@@ -4293,6 +4323,8 @@ static void igb_tx_map(struct igb_ring *tx_ring,
/* write last descriptor with RS and EOP bits */ /* write last descriptor with RS and EOP bits */
cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD); cmd_type |= cpu_to_le32(size) | cpu_to_le32(IGB_TXD_DCMD);
if (unlikely(skb->no_fcs))
cmd_type &= ~(cpu_to_le32(E1000_ADVTXD_DCMD_IFCS));
tx_desc->read.cmd_type_len = cmd_type; tx_desc->read.cmd_type_len = cmd_type;
/* set the timestamp */ /* set the timestamp */
...@@ -6098,8 +6130,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget) ...@@ -6098,8 +6130,9 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, int budget)
goto next_desc; goto next_desc;
} }
if (igb_test_staterr(rx_desc, if (unlikely((igb_test_staterr(rx_desc,
E1000_RXDEXT_ERR_FRAME_ERR_MASK)) { E1000_RXDEXT_ERR_FRAME_ERR_MASK))
&& !(rx_ring->netdev->features & NETIF_F_RXALL))) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
goto next_desc; goto next_desc;
} }
......
...@@ -72,12 +72,6 @@ ...@@ -72,12 +72,6 @@
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IXGBE_RXBUFFER_512 512 /* Used for packet split */ #define IXGBE_RXBUFFER_512 512 /* Used for packet split */
#define IXGBE_RXBUFFER_2K 2048
#define IXGBE_RXBUFFER_3K 3072
#define IXGBE_RXBUFFER_4K 4096
#define IXGBE_RXBUFFER_7K 7168
#define IXGBE_RXBUFFER_8K 8192
#define IXGBE_RXBUFFER_15K 15360
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
/* /*
...@@ -102,7 +96,6 @@ ...@@ -102,7 +96,6 @@
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5) #define IXGBE_TX_FLAGS_FCOE (u32)(1 << 5)
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 6) #define IXGBE_TX_FLAGS_FSO (u32)(1 << 6)
#define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7) #define IXGBE_TX_FLAGS_TXSW (u32)(1 << 7)
#define IXGBE_TX_FLAGS_MAPPED_AS_PAGE (u32)(1 << 8)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
...@@ -156,19 +149,18 @@ struct vf_macvlans { ...@@ -156,19 +149,18 @@ struct vf_macvlans {
struct ixgbe_tx_buffer { struct ixgbe_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch; union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp; unsigned long time_stamp;
dma_addr_t dma;
u32 length;
u32 tx_flags;
struct sk_buff *skb; struct sk_buff *skb;
u32 bytecount; unsigned int bytecount;
u16 gso_segs; unsigned short gso_segs;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
}; };
struct ixgbe_rx_buffer { struct ixgbe_rx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
dma_addr_t page_dma;
unsigned int page_offset; unsigned int page_offset;
}; };
...@@ -180,7 +172,6 @@ struct ixgbe_queue_stats { ...@@ -180,7 +172,6 @@ struct ixgbe_queue_stats {
struct ixgbe_tx_queue_stats { struct ixgbe_tx_queue_stats {
u64 restart_queue; u64 restart_queue;
u64 tx_busy; u64 tx_busy;
u64 completed;
u64 tx_done_old; u64 tx_done_old;
}; };
...@@ -193,21 +184,15 @@ struct ixgbe_rx_queue_stats { ...@@ -193,21 +184,15 @@ struct ixgbe_rx_queue_stats {
u64 csum_err; u64 csum_err;
}; };
enum ixbge_ring_state_t { enum ixgbe_ring_state_t {
__IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_DETECT_HANG, __IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED, __IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_PS_ENABLED,
__IXGBE_RX_RSC_ENABLED, __IXGBE_RX_RSC_ENABLED,
__IXGBE_RX_CSUM_UDP_ZERO_ERR, __IXGBE_RX_CSUM_UDP_ZERO_ERR,
__IXGBE_RX_FCOE_BUFSZ,
}; };
#define ring_is_ps_enabled(ring) \
test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define set_ring_ps_enabled(ring) \
set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define clear_ring_ps_enabled(ring) \
clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
#define check_for_tx_hang(ring) \ #define check_for_tx_hang(ring) \
test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \ #define set_check_for_tx_hang(ring) \
...@@ -233,7 +218,6 @@ struct ixgbe_ring { ...@@ -233,7 +218,6 @@ struct ixgbe_ring {
u8 __iomem *tail; u8 __iomem *tail;
u16 count; /* amount of descriptors */ u16 count; /* amount of descriptors */
u16 rx_buf_len;
u8 queue_index; /* needed for multiqueue queue management */ u8 queue_index; /* needed for multiqueue queue management */
u8 reg_idx; /* holds the special value that gets u8 reg_idx; /* holds the special value that gets
...@@ -241,8 +225,13 @@ struct ixgbe_ring { ...@@ -241,8 +225,13 @@ struct ixgbe_ring {
* associated with this ring, which is * associated with this ring, which is
* different for DCB and RSS modes * different for DCB and RSS modes
*/ */
u8 atr_sample_rate; union {
u8 atr_count; struct {
u8 atr_sample_rate;
u8 atr_count;
};
u16 next_to_alloc;
};
u16 next_to_use; u16 next_to_use;
u16 next_to_clean; u16 next_to_clean;
...@@ -287,6 +276,22 @@ struct ixgbe_ring_feature { ...@@ -287,6 +276,22 @@ struct ixgbe_ring_feature {
int mask; int mask;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
/*
* FCoE requires that all Rx buffers be over 2200 bytes in length. Since
* this is twice the size of a half page we need to double the page order
* for FCoE enabled Rx queues.
*/
#if defined(IXGBE_FCOE) && (PAGE_SIZE < 8192)
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{
return test_bit(__IXGBE_RX_FCOE_BUFSZ, &ring->state) ? 1 : 0;
}
#else
#define ixgbe_rx_pg_order(_ring) 0
#endif
#define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
#define ixgbe_rx_bufsz(_ring) ((PAGE_SIZE / 2) << ixgbe_rx_pg_order(_ring))
struct ixgbe_ring_container { struct ixgbe_ring_container {
struct ixgbe_ring *ring; /* pointer to linked list of rings */ struct ixgbe_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */ unsigned int total_bytes; /* total bytes processed this int */
...@@ -554,7 +559,7 @@ struct ixgbe_cb { ...@@ -554,7 +559,7 @@ struct ixgbe_cb {
}; };
dma_addr_t dma; dma_addr_t dma;
u16 append_cnt; u16 append_cnt;
bool delay_unmap; bool page_released;
}; };
#define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
...@@ -625,7 +630,8 @@ extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); ...@@ -625,7 +630,8 @@ extern void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32);
extern void ixgbe_do_reset(struct net_device *netdev); extern void ixgbe_do_reset(struct net_device *netdev);
#ifdef IXGBE_FCOE #ifdef IXGBE_FCOE
extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, extern int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u32 tx_flags, u8 *hdr_len); u32 tx_flags, u8 *hdr_len);
extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter); extern void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter);
extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, extern int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/highmem.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include "ixgbe.h" #include "ixgbe.h"
...@@ -1615,7 +1616,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1615,7 +1616,6 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
rx_ring->dev = &adapter->pdev->dev; rx_ring->dev = &adapter->pdev->dev;
rx_ring->netdev = adapter->netdev; rx_ring->netdev = adapter->netdev;
rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K;
err = ixgbe_setup_rx_resources(rx_ring); err = ixgbe_setup_rx_resources(rx_ring);
if (err) { if (err) {
...@@ -1718,13 +1718,15 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, ...@@ -1718,13 +1718,15 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer,
frame_size >>= 1; frame_size >>= 1;
data = rx_buffer->skb->data; data = kmap(rx_buffer->page) + rx_buffer->page_offset;
if (data[3] != 0xFF || if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE || data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF) data[frame_size + 12] != 0xAF)
match = false; match = false;
kunmap(rx_buffer->page);
return match; return match;
} }
...@@ -1746,17 +1748,22 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, ...@@ -1746,17 +1748,22 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
/* check Rx buffer */ /* check Rx buffer */
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap Rx buffer, will be remapped by alloc_rx_buffers */ /* sync Rx buffer for CPU read */
dma_unmap_single(rx_ring->dev, dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer->dma, rx_buffer->dma,
rx_ring->rx_buf_len, ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
rx_buffer->dma = 0;
/* verify contents of skb */ /* verify contents of skb */
if (ixgbe_check_lbtest_frame(rx_buffer, size)) if (ixgbe_check_lbtest_frame(rx_buffer, size))
count++; count++;
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer->dma,
ixgbe_rx_bufsz(rx_ring),
DMA_FROM_DEVICE);
/* unmap buffer on Tx side */ /* unmap buffer on Tx side */
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
......
...@@ -447,7 +447,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, ...@@ -447,7 +447,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
/** /**
* ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO)
* @tx_ring: tx desc ring * @tx_ring: tx desc ring
* @skb: associated skb * @first: first tx_buffer structure containing skb, tx_flags, and protocol
* @tx_flags: tx flags * @tx_flags: tx flags
* @hdr_len: hdr_len to be returned * @hdr_len: hdr_len to be returned
* *
...@@ -455,9 +455,11 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, ...@@ -455,9 +455,11 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
* *
* Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error
*/ */
int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, int ixgbe_fso(struct ixgbe_ring *tx_ring,
struct ixgbe_tx_buffer *first,
u32 tx_flags, u8 *hdr_len) u32 tx_flags, u8 *hdr_len)
{ {
struct sk_buff *skb = first->skb;
struct fc_frame_header *fh; struct fc_frame_header *fh;
u32 vlan_macip_lens; u32 vlan_macip_lens;
u32 fcoe_sof_eof = 0; u32 fcoe_sof_eof = 0;
...@@ -530,9 +532,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb, ...@@ -530,9 +532,14 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, struct sk_buff *skb,
*hdr_len = sizeof(struct fcoe_crc_eof); *hdr_len = sizeof(struct fcoe_crc_eof);
/* hdr_len includes fc_hdr if FCoE LSO is enabled */ /* hdr_len includes fc_hdr if FCoE LSO is enabled */
if (skb_is_gso(skb)) if (skb_is_gso(skb)) {
*hdr_len += (skb_transport_offset(skb) + *hdr_len += skb_transport_offset(skb) +
sizeof(struct fc_frame_header)); sizeof(struct fc_frame_header);
/* update gso_segs and bytecount */
first->gso_segs = DIV_ROUND_UP(skb->len - *hdr_len,
skb_shinfo(skb)->gso_size);
first->bytecount += (first->gso_segs - 1) * *hdr_len;
}
/* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */
mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; mss_l4len_idx = skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment