Commit 85b430b4 authored by Alexander Duyck's avatar Alexander Duyck Committed by David S. Miller

igb: move the tx and rx ring specific config into seperate functions

This change makes the tx and rx config a bit cleaner by breaking out the ring
specific configuration from the generic rx and tx configuration.
Signed-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 7d95b717
...@@ -87,9 +87,13 @@ struct vf_data_storage { ...@@ -87,9 +87,13 @@ struct vf_data_storage {
* descriptors until either it has this many to write back, or the * descriptors until either it has this many to write back, or the
* ITR timer expires. * ITR timer expires.
*/ */
#define IGB_RX_PTHRESH 16 #define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
#define IGB_RX_HTHRESH 8 #define IGB_RX_HTHRESH 8
#define IGB_RX_WTHRESH 1 #define IGB_RX_WTHRESH 1
#define IGB_TX_PTHRESH 8
#define IGB_TX_HTHRESH 1
#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
adapter->msix_entries) ? 0 : 16)
/* this is the size past which hardware will drop packets when setting LPE=0 */ /* this is the size past which hardware will drop packets when setting LPE=0 */
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
......
...@@ -90,6 +90,7 @@ static int igb_open(struct net_device *); ...@@ -90,6 +90,7 @@ static int igb_open(struct net_device *);
static int igb_close(struct net_device *); static int igb_close(struct net_device *);
static void igb_configure_tx(struct igb_adapter *); static void igb_configure_tx(struct igb_adapter *);
static void igb_configure_rx(struct igb_adapter *); static void igb_configure_rx(struct igb_adapter *);
static void igb_setup_tctl(struct igb_adapter *);
static void igb_setup_rctl(struct igb_adapter *); static void igb_setup_rctl(struct igb_adapter *);
static void igb_clean_all_tx_rings(struct igb_adapter *); static void igb_clean_all_tx_rings(struct igb_adapter *);
static void igb_clean_all_rx_rings(struct igb_adapter *); static void igb_clean_all_rx_rings(struct igb_adapter *);
...@@ -1101,8 +1102,10 @@ static void igb_configure(struct igb_adapter *adapter) ...@@ -1101,8 +1102,10 @@ static void igb_configure(struct igb_adapter *adapter)
igb_restore_vlan(adapter); igb_restore_vlan(adapter);
igb_configure_tx(adapter); igb_setup_tctl(adapter);
igb_setup_rctl(adapter); igb_setup_rctl(adapter);
igb_configure_tx(adapter);
igb_configure_rx(adapter); igb_configure_rx(adapter);
igb_rx_fifo_flush_82575(&adapter->hw); igb_rx_fifo_flush_82575(&adapter->hw);
...@@ -2069,48 +2072,15 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter) ...@@ -2069,48 +2072,15 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
} }
/** /**
* igb_configure_tx - Configure transmit Unit after Reset * igb_setup_tctl - configure the transmit control registers
* @adapter: board private structure * @adapter: Board private structure
*
* Configure the Tx unit of the MAC after a reset.
**/ **/
static void igb_configure_tx(struct igb_adapter *adapter) static void igb_setup_tctl(struct igb_adapter *adapter)
{ {
u64 tdba;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 tctl; u32 tctl;
u32 txdctl, txctrl;
int i, j;
for (i = 0; i < adapter->num_tx_queues; i++) { /* disable queue 0 which is enabled by default on 82575 and 82576 */
struct igb_ring *ring = &adapter->tx_ring[i];
j = ring->reg_idx;
wr32(E1000_TDLEN(j),
ring->count * sizeof(union e1000_adv_tx_desc));
tdba = ring->dma;
wr32(E1000_TDBAL(j),
tdba & 0x00000000ffffffffULL);
wr32(E1000_TDBAH(j), tdba >> 32);
ring->head = E1000_TDH(j);
ring->tail = E1000_TDT(j);
writel(0, hw->hw_addr + ring->tail);
writel(0, hw->hw_addr + ring->head);
txdctl = rd32(E1000_TXDCTL(j));
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(j), txdctl);
/* Turn off Relaxed Ordering on head write-backs. The
* writebacks MUST be delivered in order or it will
* completely screw up our bookeeping.
*/
txctrl = rd32(E1000_DCA_TXCTRL(j));
txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
wr32(E1000_DCA_TXCTRL(j), txctrl);
}
/* disable queue 0 to prevent tail bump w/o re-configuration */
if (adapter->vfs_allocated_count)
wr32(E1000_TXDCTL(0), 0); wr32(E1000_TXDCTL(0), 0);
/* Program the Transmit Control Register */ /* Program the Transmit Control Register */
...@@ -2121,15 +2091,70 @@ static void igb_configure_tx(struct igb_adapter *adapter) ...@@ -2121,15 +2091,70 @@ static void igb_configure_tx(struct igb_adapter *adapter)
igb_config_collision_dist(hw); igb_config_collision_dist(hw);
/* Setup Transmit Descriptor Settings for eop descriptor */
adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
/* Enable transmits */ /* Enable transmits */
tctl |= E1000_TCTL_EN; tctl |= E1000_TCTL_EN;
wr32(E1000_TCTL, tctl); wr32(E1000_TCTL, tctl);
} }
/**
* igb_configure_tx_ring - Configure transmit ring after Reset
* @adapter: board private structure
* @ring: tx ring to configure
*
* Configure a transmit ring after a reset.
**/
static void igb_configure_tx_ring(struct igb_adapter *adapter,
struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u32 txdctl;
u64 tdba = ring->dma;
int reg_idx = ring->reg_idx;
/* disable the queue */
txdctl = rd32(E1000_TXDCTL(reg_idx));
wr32(E1000_TXDCTL(reg_idx),
txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
wrfl();
mdelay(10);
wr32(E1000_TDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_tx_desc));
wr32(E1000_TDBAL(reg_idx),
tdba & 0x00000000ffffffffULL);
wr32(E1000_TDBAH(reg_idx), tdba >> 32);
ring->head = E1000_TDH(reg_idx);
ring->tail = E1000_TDT(reg_idx);
writel(0, hw->hw_addr + ring->tail);
writel(0, hw->hw_addr + ring->head);
txdctl |= IGB_TX_PTHRESH;
txdctl |= IGB_TX_HTHRESH << 8;
txdctl |= IGB_TX_WTHRESH << 16;
txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
wr32(E1000_TXDCTL(reg_idx), txdctl);
}
/**
* igb_configure_tx - Configure transmit Unit after Reset
* @adapter: board private structure
*
* Configure the Tx unit of the MAC after a reset.
**/
static void igb_configure_tx(struct igb_adapter *adapter)
{
int i;
for (i = 0; i < adapter->num_tx_queues; i++)
igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
/* Setup Transmit Descriptor Settings for eop descriptor */
adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
}
/** /**
* igb_setup_rx_resources - allocate Rx resources (Descriptors) * igb_setup_rx_resources - allocate Rx resources (Descriptors)
* @adapter: board private structure * @adapter: board private structure
...@@ -2333,6 +2358,49 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter) ...@@ -2333,6 +2358,49 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
wr32(E1000_VT_CTL, vtctl); wr32(E1000_VT_CTL, vtctl);
} }
/**
* igb_configure_rx_ring - Configure a receive ring after Reset
* @adapter: board private structure
* @ring: receive ring to be configured
*
* Configure the Rx unit of the MAC after a reset.
**/
static void igb_configure_rx_ring(struct igb_adapter *adapter,
struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u64 rdba = ring->dma;
int reg_idx = ring->reg_idx;
u32 rxdctl;
/* disable the queue */
rxdctl = rd32(E1000_RXDCTL(reg_idx));
wr32(E1000_RXDCTL(reg_idx),
rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
/* Set DMA base address registers */
wr32(E1000_RDBAL(reg_idx),
rdba & 0x00000000ffffffffULL);
wr32(E1000_RDBAH(reg_idx), rdba >> 32);
wr32(E1000_RDLEN(reg_idx),
ring->count * sizeof(union e1000_adv_rx_desc));
/* initialize head and tail */
ring->head = E1000_RDH(reg_idx);
ring->tail = E1000_RDT(reg_idx);
writel(0, hw->hw_addr + ring->head);
writel(0, hw->hw_addr + ring->tail);
/* enable receive descriptor fetching */
rxdctl = rd32(E1000_RXDCTL(reg_idx));
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
rxdctl &= 0xFFF00000;
rxdctl |= IGB_RX_PTHRESH;
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
/** /**
* igb_configure_rx - Configure receive Unit after Reset * igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure * @adapter: board private structure
...@@ -2341,10 +2409,8 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter) ...@@ -2341,10 +2409,8 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
**/ **/
static void igb_configure_rx(struct igb_adapter *adapter) static void igb_configure_rx(struct igb_adapter *adapter)
{ {
u64 rdba;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u32 rctl, rxcsum; u32 rctl, rxcsum;
u32 rxdctl;
int i; int i;
/* disable receives while setting up the descriptors */ /* disable receives while setting up the descriptors */
...@@ -2358,29 +2424,8 @@ static void igb_configure_rx(struct igb_adapter *adapter) ...@@ -2358,29 +2424,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */ * the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++)
struct igb_ring *ring = &adapter->rx_ring[i]; igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
int j = ring->reg_idx;
rdba = ring->dma;
wr32(E1000_RDBAL(j),
rdba & 0x00000000ffffffffULL);
wr32(E1000_RDBAH(j), rdba >> 32);
wr32(E1000_RDLEN(j),
ring->count * sizeof(union e1000_adv_rx_desc));
ring->head = E1000_RDH(j);
ring->tail = E1000_RDT(j);
writel(0, hw->hw_addr + ring->tail);
writel(0, hw->hw_addr + ring->head);
rxdctl = rd32(E1000_RXDCTL(j));
rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
rxdctl &= 0xFFF00000;
rxdctl |= IGB_RX_PTHRESH;
rxdctl |= IGB_RX_HTHRESH << 8;
rxdctl |= IGB_RX_WTHRESH << 16;
wr32(E1000_RXDCTL(j), rxdctl);
}
if (adapter->num_rx_queues > 1) { if (adapter->num_rx_queues > 1) {
u32 random[10]; u32 random[10];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment