Commit 0d22be52 authored by Jiawen Wu's avatar Jiawen Wu Committed by David S. Miller

net: txgbe: Support Rx and Tx process path

Clean Rx and Tx ring interrupts, process packets in the data path.
Signed-off-by: default avatarJiawen Wu <jiawenwu@trustnetic.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 09a50880
...@@ -223,6 +223,10 @@ static void txgbe_up_complete(struct wx *wx) ...@@ -223,6 +223,10 @@ static void txgbe_up_complete(struct wx *wx)
wx_control_hw(wx, true); wx_control_hw(wx, true);
wx_configure_vectors(wx); wx_configure_vectors(wx);
/* make sure to complete pre-operations */
smp_mb__before_atomic();
wx_napi_enable_all(wx);
/* clear any pending interrupts, may auto mask */ /* clear any pending interrupts, may auto mask */
rd32(wx, WX_PX_IC); rd32(wx, WX_PX_IC);
rd32(wx, WX_PX_MISC_IC); rd32(wx, WX_PX_MISC_IC);
...@@ -236,6 +240,10 @@ static void txgbe_up_complete(struct wx *wx) ...@@ -236,6 +240,10 @@ static void txgbe_up_complete(struct wx *wx)
wr32(wx, WX_MAC_WDG_TIMEOUT, reg); wr32(wx, WX_MAC_WDG_TIMEOUT, reg);
reg = rd32(wx, WX_MAC_TX_CFG); reg = rd32(wx, WX_MAC_TX_CFG);
wr32(wx, WX_MAC_TX_CFG, (reg & ~WX_MAC_TX_CFG_SPEED_MASK) | WX_MAC_TX_CFG_SPEED_10G); wr32(wx, WX_MAC_TX_CFG, (reg & ~WX_MAC_TX_CFG_SPEED_MASK) | WX_MAC_TX_CFG_SPEED_10G);
/* enable transmits */
netif_tx_start_all_queues(wx->netdev);
netif_carrier_on(wx->netdev);
} }
static void txgbe_reset(struct wx *wx) static void txgbe_reset(struct wx *wx)
...@@ -268,10 +276,12 @@ static void txgbe_disable_device(struct wx *wx) ...@@ -268,10 +276,12 @@ static void txgbe_disable_device(struct wx *wx)
/* this call also flushes the previous write */ /* this call also flushes the previous write */
wx_disable_rx_queue(wx, wx->rx_ring[i]); wx_disable_rx_queue(wx, wx->rx_ring[i]);
netif_tx_stop_all_queues(netdev);
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_tx_disable(netdev); netif_tx_disable(netdev);
wx_irq_disable(wx); wx_irq_disable(wx);
wx_napi_disable_all(wx);
if (wx->bus.func < 2) if (wx->bus.func < 2)
wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0); wr32m(wx, TXGBE_MIS_PRB_CTL, TXGBE_MIS_PRB_CTL_LAN_UP(wx->bus.func), 0);
...@@ -300,6 +310,9 @@ static void txgbe_down(struct wx *wx) ...@@ -300,6 +310,9 @@ static void txgbe_down(struct wx *wx)
{ {
txgbe_disable_device(wx); txgbe_disable_device(wx);
txgbe_reset(wx); txgbe_reset(wx);
wx_clean_all_tx_rings(wx);
wx_clean_all_rx_rings(wx);
} }
/** /**
...@@ -381,10 +394,21 @@ static int txgbe_open(struct net_device *netdev) ...@@ -381,10 +394,21 @@ static int txgbe_open(struct net_device *netdev)
if (err) if (err)
goto err_free_isb; goto err_free_isb;
/* Notify the stack of the actual queue counts. */
err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues);
if (err)
goto err_free_irq;
err = netif_set_real_num_rx_queues(netdev, wx->num_rx_queues);
if (err)
goto err_free_irq;
txgbe_up_complete(wx); txgbe_up_complete(wx);
return 0; return 0;
err_free_irq:
wx_free_irq(wx);
err_free_isb: err_free_isb:
wx_free_isb_resources(wx); wx_free_isb_resources(wx);
err_reset: err_reset:
...@@ -403,8 +427,6 @@ static int txgbe_open(struct net_device *netdev) ...@@ -403,8 +427,6 @@ static int txgbe_open(struct net_device *netdev)
static void txgbe_close_suspend(struct wx *wx) static void txgbe_close_suspend(struct wx *wx)
{ {
txgbe_disable_device(wx); txgbe_disable_device(wx);
wx_free_irq(wx);
wx_free_resources(wx); wx_free_resources(wx);
} }
...@@ -461,19 +483,14 @@ static void txgbe_shutdown(struct pci_dev *pdev) ...@@ -461,19 +483,14 @@ static void txgbe_shutdown(struct pci_dev *pdev)
} }
} }
static netdev_tx_t txgbe_xmit_frame(struct sk_buff *skb,
struct net_device *netdev)
{
return NETDEV_TX_OK;
}
static const struct net_device_ops txgbe_netdev_ops = { static const struct net_device_ops txgbe_netdev_ops = {
.ndo_open = txgbe_open, .ndo_open = txgbe_open,
.ndo_stop = txgbe_close, .ndo_stop = txgbe_close,
.ndo_start_xmit = txgbe_xmit_frame, .ndo_start_xmit = wx_xmit_frame,
.ndo_set_rx_mode = wx_set_rx_mode, .ndo_set_rx_mode = wx_set_rx_mode,
.ndo_validate_addr = eth_validate_addr, .ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = wx_set_mac, .ndo_set_mac_address = wx_set_mac,
.ndo_get_stats64 = wx_get_stats64,
}; };
/** /**
...@@ -647,6 +664,8 @@ static int txgbe_probe(struct pci_dev *pdev, ...@@ -647,6 +664,8 @@ static int txgbe_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, wx); pci_set_drvdata(pdev, wx);
netif_tx_stop_all_queues(netdev);
/* calculate the expected PCIe bandwidth required for optimal /* calculate the expected PCIe bandwidth required for optimal
* performance. Note that some older parts will never have enough * performance. Note that some older parts will never have enough
* bandwidth due to being older generation PCIe parts. We clamp these * bandwidth due to being older generation PCIe parts. We clamp these
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment