Commit c1ebf46c authored by David S. Miller's avatar David S. Miller

igb: Support netdev_ops->ndo_xmit_flush()

Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4798248e
...@@ -136,6 +136,7 @@ static void igb_update_phy_info(unsigned long); ...@@ -136,6 +136,7 @@ static void igb_update_phy_info(unsigned long);
static void igb_watchdog(unsigned long); static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *); static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *); static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static void igb_xmit_flush(struct net_device *netdev, u16 queue);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev, static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats); struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int); static int igb_change_mtu(struct net_device *, int);
...@@ -2075,6 +2076,7 @@ static const struct net_device_ops igb_netdev_ops = { ...@@ -2075,6 +2076,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_open = igb_open, .ndo_open = igb_open,
.ndo_stop = igb_close, .ndo_stop = igb_close,
.ndo_start_xmit = igb_xmit_frame, .ndo_start_xmit = igb_xmit_frame,
.ndo_xmit_flush = igb_xmit_flush,
.ndo_get_stats64 = igb_get_stats64, .ndo_get_stats64 = igb_get_stats64,
.ndo_set_rx_mode = igb_set_rx_mode, .ndo_set_rx_mode = igb_set_rx_mode,
.ndo_set_mac_address = igb_set_mac, .ndo_set_mac_address = igb_set_mac,
...@@ -4915,13 +4917,6 @@ static void igb_tx_map(struct igb_ring *tx_ring, ...@@ -4915,13 +4917,6 @@ static void igb_tx_map(struct igb_ring *tx_ring,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
return; return;
dma_error: dma_error:
...@@ -5057,17 +5052,20 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, ...@@ -5057,17 +5052,20 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter, static struct igb_ring *__igb_tx_queue_mapping(struct igb_adapter *adapter, unsigned int r_idx)
struct sk_buff *skb)
{ {
unsigned int r_idx = skb->queue_mapping;
if (r_idx >= adapter->num_tx_queues) if (r_idx >= adapter->num_tx_queues)
r_idx = r_idx % adapter->num_tx_queues; r_idx = r_idx % adapter->num_tx_queues;
return adapter->tx_ring[r_idx]; return adapter->tx_ring[r_idx];
} }
static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
struct sk_buff *skb)
{
return __igb_tx_queue_mapping(adapter, skb->queue_mapping);
}
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
struct net_device *netdev) struct net_device *netdev)
{ {
...@@ -5096,6 +5094,21 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, ...@@ -5096,6 +5094,21 @@ static netdev_tx_t igb_xmit_frame(struct sk_buff *skb,
return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb)); return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
} }
static void igb_xmit_flush(struct net_device *netdev, u16 queue)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct igb_ring *tx_ring;
tx_ring = __igb_tx_queue_mapping(adapter, queue);
writel(tx_ring->next_to_use, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
}
/** /**
* igb_tx_timeout - Respond to a Tx Hang * igb_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure * @netdev: network interface device structure
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment