Commit e694e964 authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

igb: place a pointer to the netdev struct in the ring itself

This change adds a pointer to the netdev to the ring itself.  The idea being
at some point in the future it will be possible to support multiple netdevs
from a single adapter struct.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 85ad76b2
...@@ -175,9 +175,10 @@ struct igb_q_vector { ...@@ -175,9 +175,10 @@ struct igb_q_vector {
struct igb_ring { struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */ struct igb_q_vector *q_vector; /* backlink to q_vector */
void *desc; /* descriptor ring memory */ struct net_device *netdev; /* back pointer to net_device */
struct pci_dev *pdev; /* pci device for dma mapping */ struct pci_dev *pdev; /* pci device for dma mapping */
dma_addr_t dma; /* phys address of the ring */ dma_addr_t dma; /* phys address of the ring */
void *desc; /* descriptor ring memory */
unsigned int size; /* length of desc. ring in bytes */ unsigned int size; /* length of desc. ring in bytes */
unsigned int count; /* number of desc. in the ring */ unsigned int count; /* number of desc. in the ring */
u16 next_to_use; u16 next_to_use;
......
...@@ -101,7 +101,6 @@ static void igb_update_phy_info(unsigned long); ...@@ -101,7 +101,6 @@ static void igb_update_phy_info(unsigned long);
static void igb_watchdog(unsigned long); static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *); static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *,
struct net_device *,
struct igb_ring *); struct igb_ring *);
static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
struct net_device *); struct net_device *);
...@@ -437,6 +436,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter) ...@@ -437,6 +436,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring->count = adapter->tx_ring_count; ring->count = adapter->tx_ring_count;
ring->queue_index = i; ring->queue_index = i;
ring->pdev = adapter->pdev; ring->pdev = adapter->pdev;
ring->netdev = adapter->netdev;
/* For 82575, context index must be unique per ring. */ /* For 82575, context index must be unique per ring. */
if (adapter->hw.mac.type == e1000_82575) if (adapter->hw.mac.type == e1000_82575)
ring->flags = IGB_RING_FLAG_TX_CTX_IDX; ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
...@@ -447,6 +447,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter) ...@@ -447,6 +447,7 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
ring->count = adapter->rx_ring_count; ring->count = adapter->rx_ring_count;
ring->queue_index = i; ring->queue_index = i;
ring->pdev = adapter->pdev; ring->pdev = adapter->pdev;
ring->netdev = adapter->netdev;
ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */ ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
/* set flag indicating ring supports SCTP checksum offload */ /* set flag indicating ring supports SCTP checksum offload */
...@@ -3550,9 +3551,10 @@ static inline void igb_tx_queue_adv(struct igb_ring *tx_ring, ...@@ -3550,9 +3551,10 @@ static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
mmiowb(); mmiowb();
} }
static int __igb_maybe_stop_tx(struct net_device *netdev, static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
struct igb_ring *tx_ring, int size)
{ {
struct net_device *netdev = tx_ring->netdev;
netif_stop_subqueue(netdev, tx_ring->queue_index); netif_stop_subqueue(netdev, tx_ring->queue_index);
/* Herbert's original patch had: /* Herbert's original patch had:
...@@ -3571,19 +3573,17 @@ static int __igb_maybe_stop_tx(struct net_device *netdev, ...@@ -3571,19 +3573,17 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
return 0; return 0;
} }
static int igb_maybe_stop_tx(struct net_device *netdev, static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
struct igb_ring *tx_ring, int size)
{ {
if (igb_desc_unused(tx_ring) >= size) if (igb_desc_unused(tx_ring) >= size)
return 0; return 0;
return __igb_maybe_stop_tx(netdev, tx_ring, size); return __igb_maybe_stop_tx(tx_ring, size);
} }
static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
struct net_device *netdev,
struct igb_ring *tx_ring) struct igb_ring *tx_ring)
{ {
struct igb_adapter *adapter = netdev_priv(netdev); struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
unsigned int first; unsigned int first;
unsigned int tx_flags = 0; unsigned int tx_flags = 0;
u8 hdr_len = 0; u8 hdr_len = 0;
...@@ -3606,7 +3606,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, ...@@ -3606,7 +3606,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
* + 1 desc for skb->data, * + 1 desc for skb->data,
* + 1 desc for context descriptor, * + 1 desc for context descriptor,
* otherwise try next time */ * otherwise try next time */
if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
/* this is a hard error */ /* this is a hard error */
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
...@@ -3665,7 +3665,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, ...@@ -3665,7 +3665,7 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len); igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
/* Make sure there is space in the ring for the next send. */ /* Make sure there is space in the ring for the next send. */
igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4); igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -3684,7 +3684,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, ...@@ -3684,7 +3684,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
* to a flow. Right now, performance is impacted slightly negatively * to a flow. Right now, performance is impacted slightly negatively
* if using multiple tx queues. If the stack breaks away from a * if using multiple tx queues. If the stack breaks away from a
* single qdisc implementation, we can look at this again. */ * single qdisc implementation, we can look at this again. */
return igb_xmit_frame_ring_adv(skb, netdev, tx_ring); return igb_xmit_frame_ring_adv(skb, tx_ring);
} }
/** /**
...@@ -4667,7 +4667,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector) ...@@ -4667,7 +4667,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
{ {
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct igb_ring *tx_ring = q_vector->tx_ring; struct igb_ring *tx_ring = q_vector->tx_ring;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = tx_ring->netdev;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
struct igb_buffer *buffer_info; struct igb_buffer *buffer_info;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -4841,8 +4841,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, ...@@ -4841,8 +4841,8 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
int *work_done, int budget) int *work_done, int budget)
{ {
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct net_device *netdev = adapter->netdev;
struct igb_ring *rx_ring = q_vector->rx_ring; struct igb_ring *rx_ring = q_vector->rx_ring;
struct net_device *netdev = rx_ring->netdev;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = rx_ring->pdev; struct pci_dev *pdev = rx_ring->pdev;
union e1000_adv_rx_desc *rx_desc , *next_rxd; union e1000_adv_rx_desc *rx_desc , *next_rxd;
...@@ -5018,8 +5018,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector, ...@@ -5018,8 +5018,7 @@ static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
int cleaned_count) int cleaned_count)
{ {
struct igb_adapter *adapter = rx_ring->q_vector->adapter; struct net_device *netdev = rx_ring->netdev;
struct net_device *netdev = adapter->netdev;
union e1000_adv_rx_desc *rx_desc; union e1000_adv_rx_desc *rx_desc;
struct igb_buffer *buffer_info; struct igb_buffer *buffer_info;
struct sk_buff *skb; struct sk_buff *skb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment