Commit 7b1e46c5 authored by David S. Miller's avatar David S. Miller

Merge branch 'ixgbevf'

Aaron Brown says:

====================
Intel Wired LAN Driver Updates

This series contains updates from Emil to ixgbevf.

He cleans up the code by removing the adapter structure as a
parameter from multiple functions in favor of using the ixgbevf_ring
structure and moves hot-path specific statistic int the ring
structure for anticipated performance gains.

He also removes the Tx/Rx counters for checksum offload and adds
counters for tx_restart_queue and tx_timeout_count.

Next he makes it so that the first tx_buffer structure acts as a
central storage location for most the skb info we are about to
transmit, then takes advantage of the dma buffer always being
present in the first descriptor and mapped as single allowing a
call to dma_unmap_single which alleviates the need to check for
DMA mapping in ixgbevf_clean_tx_irq().

Finally he merges the ixgbevf_tx_map call and the ixgbevf_tx_queue
call into a single function.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 144651d1 29d37fa1
...@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed; ...@@ -183,6 +183,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ #define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ #define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ #define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
/* Transmit Descriptor - Advanced */ /* Transmit Descriptor - Advanced */
union ixgbe_adv_tx_desc { union ixgbe_adv_tx_desc {
......
...@@ -77,11 +77,11 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { ...@@ -77,11 +77,11 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
{"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc, {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
stats.saved_reset_vfgotc)}, stats.saved_reset_vfgotc)},
{"tx_busy", IXGBEVF_ZSTAT(tx_busy)}, {"tx_busy", IXGBEVF_ZSTAT(tx_busy)},
{"tx_restart_queue", IXGBEVF_ZSTAT(restart_queue)},
{"tx_timeout_count", IXGBEVF_ZSTAT(tx_timeout_count)},
{"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc, {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
stats.saved_reset_vfmprc)}, stats.saved_reset_vfmprc)},
{"rx_csum_offload_good", IXGBEVF_ZSTAT(hw_csum_rx_good)},
{"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)}, {"rx_csum_offload_errors", IXGBEVF_ZSTAT(hw_csum_rx_error)},
{"tx_csum_offload_ctxt", IXGBEVF_ZSTAT(hw_csum_tx_good)},
#ifdef BP_EXTENDED_STATS #ifdef BP_EXTENDED_STATS
{"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)}, {"rx_bp_poll_yield", IXGBEVF_ZSTAT(bp_rx_yields)},
{"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)}, {"rx_bp_cleaned", IXGBEVF_ZSTAT(bp_rx_cleaned)},
...@@ -305,12 +305,11 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -305,12 +305,11 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* clone ring and setup updated count */ /* clone ring and setup updated count */
tx_ring[i] = *adapter->tx_ring[i]; tx_ring[i] = *adapter->tx_ring[i];
tx_ring[i].count = new_tx_count; tx_ring[i].count = new_tx_count;
err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]); err = ixgbevf_setup_tx_resources(&tx_ring[i]);
if (!err) if (err) {
continue;
while (i) { while (i) {
i--; i--;
ixgbevf_free_tx_resources(adapter, &tx_ring[i]); ixgbevf_free_tx_resources(&tx_ring[i]);
} }
vfree(tx_ring); vfree(tx_ring);
...@@ -319,6 +318,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -319,6 +318,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
goto clear_reset; goto clear_reset;
} }
} }
}
if (new_rx_count != adapter->rx_ring_count) { if (new_rx_count != adapter->rx_ring_count) {
rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring)); rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
...@@ -331,12 +331,11 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -331,12 +331,11 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* clone ring and setup updated count */ /* clone ring and setup updated count */
rx_ring[i] = *adapter->rx_ring[i]; rx_ring[i] = *adapter->rx_ring[i];
rx_ring[i].count = new_rx_count; rx_ring[i].count = new_rx_count;
err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]); err = ixgbevf_setup_rx_resources(&rx_ring[i]);
if (!err) if (err) {
continue;
while (i) { while (i) {
i--; i--;
ixgbevf_free_rx_resources(adapter, &rx_ring[i]); ixgbevf_free_rx_resources(&rx_ring[i]);
} }
vfree(rx_ring); vfree(rx_ring);
...@@ -345,6 +344,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -345,6 +344,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
goto clear_reset; goto clear_reset;
} }
} }
}
/* bring interface down to prepare for update */ /* bring interface down to prepare for update */
ixgbevf_down(adapter); ixgbevf_down(adapter);
...@@ -352,7 +352,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -352,7 +352,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Tx */ /* Tx */
if (tx_ring) { if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
ixgbevf_free_tx_resources(adapter, adapter->tx_ring[i]); ixgbevf_free_tx_resources(adapter->tx_ring[i]);
*adapter->tx_ring[i] = tx_ring[i]; *adapter->tx_ring[i] = tx_ring[i];
} }
adapter->tx_ring_count = new_tx_count; adapter->tx_ring_count = new_tx_count;
...@@ -364,7 +364,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -364,7 +364,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* Rx */ /* Rx */
if (rx_ring) { if (rx_ring) {
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
ixgbevf_free_rx_resources(adapter, adapter->rx_ring[i]); ixgbevf_free_rx_resources(adapter->rx_ring[i]);
*adapter->rx_ring[i] = rx_ring[i]; *adapter->rx_ring[i] = rx_ring[i];
} }
adapter->rx_ring_count = new_rx_count; adapter->rx_ring_count = new_rx_count;
...@@ -380,7 +380,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev, ...@@ -380,7 +380,7 @@ static int ixgbevf_set_ringparam(struct net_device *netdev,
/* free Tx resources if Rx error is encountered */ /* free Tx resources if Rx error is encountered */
if (tx_ring) { if (tx_ring) {
for (i = 0; i < adapter->num_tx_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
ixgbevf_free_tx_resources(adapter, &tx_ring[i]); ixgbevf_free_tx_resources(&tx_ring[i]);
vfree(tx_ring); vfree(tx_ring);
} }
...@@ -411,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, ...@@ -411,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
tx_yields = 0, tx_cleaned = 0, tx_missed = 0; tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
rx_yields += adapter->rx_ring[i]->bp_yields; rx_yields += adapter->rx_ring[i]->stats.yields;
rx_cleaned += adapter->rx_ring[i]->bp_cleaned; rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
rx_yields += adapter->rx_ring[i]->bp_yields; rx_yields += adapter->rx_ring[i]->stats.yields;
} }
for (i = 0; i < adapter->num_tx_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
tx_yields += adapter->tx_ring[i]->bp_yields; tx_yields += adapter->tx_ring[i]->stats.yields;
tx_cleaned += adapter->tx_ring[i]->bp_cleaned; tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
tx_yields += adapter->tx_ring[i]->bp_yields; tx_yields += adapter->tx_ring[i]->stats.yields;
} }
adapter->bp_rx_yields = rx_yields; adapter->bp_rx_yields = rx_yields;
......
...@@ -46,12 +46,15 @@ ...@@ -46,12 +46,15 @@
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */ * so a DMA handle can be stored along with the buffer */
struct ixgbevf_tx_buffer { struct ixgbevf_tx_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
union ixgbe_adv_tx_desc *next_to_watch; union ixgbe_adv_tx_desc *next_to_watch;
u16 length; unsigned long time_stamp;
u16 mapped_as_page; struct sk_buff *skb;
unsigned int bytecount;
unsigned short gso_segs;
__be16 protocol;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
}; };
struct ixgbevf_rx_buffer { struct ixgbevf_rx_buffer {
...@@ -59,6 +62,29 @@ struct ixgbevf_rx_buffer { ...@@ -59,6 +62,29 @@ struct ixgbevf_rx_buffer {
dma_addr_t dma; dma_addr_t dma;
}; };
struct ixgbevf_stats {
u64 packets;
u64 bytes;
#ifdef BP_EXTENDED_STATS
u64 yields;
u64 misses;
u64 cleaned;
#endif
};
struct ixgbevf_tx_queue_stats {
u64 restart_queue;
u64 tx_busy;
u64 tx_done_old;
};
struct ixgbevf_rx_queue_stats {
u64 non_eop_descs;
u64 alloc_rx_page_failed;
u64 alloc_rx_buff_failed;
u64 csum_err;
};
struct ixgbevf_ring { struct ixgbevf_ring {
struct ixgbevf_ring *next; struct ixgbevf_ring *next;
struct net_device *netdev; struct net_device *netdev;
...@@ -70,22 +96,19 @@ struct ixgbevf_ring { ...@@ -70,22 +96,19 @@ struct ixgbevf_ring {
unsigned int next_to_use; unsigned int next_to_use;
unsigned int next_to_clean; unsigned int next_to_clean;
int queue_index; /* needed for multiqueue queue management */
union { union {
struct ixgbevf_tx_buffer *tx_buffer_info; struct ixgbevf_tx_buffer *tx_buffer_info;
struct ixgbevf_rx_buffer *rx_buffer_info; struct ixgbevf_rx_buffer *rx_buffer_info;
}; };
u64 total_bytes; struct ixgbevf_stats stats;
u64 total_packets;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
union {
struct ixgbevf_tx_queue_stats tx_stats;
struct ixgbevf_rx_queue_stats rx_stats;
};
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u64 hw_csum_rx_good;
#ifdef BP_EXTENDED_STATS
u64 bp_yields;
u64 bp_misses;
u64 bp_cleaned;
#endif
u8 __iomem *tail; u8 __iomem *tail;
u16 reg_idx; /* holds the special value that gets the hardware register u16 reg_idx; /* holds the special value that gets the hardware register
...@@ -93,6 +116,7 @@ struct ixgbevf_ring { ...@@ -93,6 +116,7 @@ struct ixgbevf_ring {
* for DCB and RSS modes */ * for DCB and RSS modes */
u16 rx_buf_len; u16 rx_buf_len;
int queue_index; /* needed for multiqueue queue management */
}; };
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
...@@ -123,8 +147,6 @@ struct ixgbevf_ring { ...@@ -123,8 +147,6 @@ struct ixgbevf_ring {
#define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1) #define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
#define IXGBE_TX_FLAGS_TSO (u32)(1 << 2) #define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
#define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3) #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
#define IXGBE_TX_FLAGS_FCOE (u32)(1 << 4)
#define IXGBE_TX_FLAGS_FSO (u32)(1 << 5)
#define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
#define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
#define IXGBE_TX_FLAGS_VLAN_SHIFT 16 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
...@@ -186,7 +208,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) ...@@ -186,7 +208,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD; q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
rc = false; rc = false;
#ifdef BP_EXTENDED_STATS #ifdef BP_EXTENDED_STATS
q_vector->tx.ring->bp_yields++; q_vector->tx.ring->stats.yields++;
#endif #endif
} else { } else {
/* we don't care if someone yielded */ /* we don't care if someone yielded */
...@@ -221,7 +243,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) ...@@ -221,7 +243,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
rc = false; rc = false;
#ifdef BP_EXTENDED_STATS #ifdef BP_EXTENDED_STATS
q_vector->rx.ring->bp_yields++; q_vector->rx.ring->stats.yields++;
#endif #endif
} else { } else {
/* preserve yield marks */ /* preserve yield marks */
...@@ -314,7 +336,6 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring) ...@@ -314,7 +336,6 @@ static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
struct ixgbevf_adapter { struct ixgbevf_adapter {
struct timer_list watchdog_timer; struct timer_list watchdog_timer;
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
u16 bd_number;
struct work_struct reset_task; struct work_struct reset_task;
struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
...@@ -327,25 +348,18 @@ struct ixgbevf_adapter { ...@@ -327,25 +348,18 @@ struct ixgbevf_adapter {
u32 eims_other; u32 eims_other;
/* TX */ /* TX */
struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
int num_tx_queues; int num_tx_queues;
struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 restart_queue; u64 restart_queue;
u64 hw_csum_tx_good;
u64 lsc_int;
u64 hw_tso_ctxt;
u64 hw_tso6_ctxt;
u32 tx_timeout_count; u32 tx_timeout_count;
/* RX */ /* RX */
struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
int num_rx_queues; int num_rx_queues;
struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u64 hw_rx_no_dma_resources; u64 hw_rx_no_dma_resources;
u64 hw_csum_rx_good;
u64 non_eop_descs; u64 non_eop_descs;
int num_msix_vectors; int num_msix_vectors;
struct msix_entry *msix_entries;
u32 alloc_rx_page_failed; u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed; u32 alloc_rx_buff_failed;
...@@ -357,6 +371,8 @@ struct ixgbevf_adapter { ...@@ -357,6 +371,8 @@ struct ixgbevf_adapter {
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1) #define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 1)
#define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2) #define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
struct msix_entry *msix_entries;
/* OS defined structs */ /* OS defined structs */
struct net_device *netdev; struct net_device *netdev;
struct pci_dev *pdev; struct pci_dev *pdev;
...@@ -364,10 +380,12 @@ struct ixgbevf_adapter { ...@@ -364,10 +380,12 @@ struct ixgbevf_adapter {
/* structs defined in ixgbe_vf.h */ /* structs defined in ixgbe_vf.h */
struct ixgbe_hw hw; struct ixgbe_hw hw;
u16 msg_enable; u16 msg_enable;
struct ixgbevf_hw_stats stats; u16 bd_number;
/* Interrupt Throttle Rate */ /* Interrupt Throttle Rate */
u32 eitr_param; u32 eitr_param;
struct ixgbevf_hw_stats stats;
unsigned long state; unsigned long state;
u64 tx_busy; u64 tx_busy;
unsigned int tx_ring_count; unsigned int tx_ring_count;
...@@ -386,9 +404,9 @@ struct ixgbevf_adapter { ...@@ -386,9 +404,9 @@ struct ixgbevf_adapter {
u32 link_speed; u32 link_speed;
bool link_up; bool link_up;
struct work_struct watchdog_task;
spinlock_t mbx_lock; spinlock_t mbx_lock;
struct work_struct watchdog_task;
}; };
enum ixbgevf_state_t { enum ixbgevf_state_t {
...@@ -420,10 +438,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter); ...@@ -420,10 +438,10 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter);
void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter); void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
void ixgbevf_reset(struct ixgbevf_adapter *adapter); void ixgbevf_reset(struct ixgbevf_adapter *adapter);
void ixgbevf_set_ethtool_ops(struct net_device *netdev); void ixgbevf_set_ethtool_ops(struct net_device *netdev);
int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
void ixgbevf_free_rx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
void ixgbevf_free_tx_resources(struct ixgbevf_adapter *, struct ixgbevf_ring *); void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter); void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
int ethtool_ioctl(struct ifreq *ifr); int ethtool_ioctl(struct ifreq *ifr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment