Commit 316ad4be authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-03-13

This series contains updates to ixgbe and ixgbevf.

Don adds additional support for X550 MAC types, which require additional
steps around enabling and disabling Rx.  Also cleans up variable type
inconsistency.

I provide a patch to allow relaxed ordering to be enabled on SPARC
architectures.  Also cleans up ixgbevf whitespace and code comments to
align the driver with networking coding standard.  Lastly cleaned up
uses of memcpy() where ether_addr_copy() could have been used.

Alex removes some dead code in the ixgbe cleanup patch.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6922022a 0d8bb414
...@@ -613,7 +613,6 @@ struct ixgbe_adapter { ...@@ -613,7 +613,6 @@ struct ixgbe_adapter {
#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) #define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4)
#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) #define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) #define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7)
#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) #define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) #define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9)
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) #define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10)
......
...@@ -171,17 +171,21 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) ...@@ -171,17 +171,21 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
* *
* Starts the hardware using the generic start_hw function. * Starts the hardware using the generic start_hw function.
* Disables relaxed ordering Then set pcie completion timeout * Disables relaxed ordering for archs other than SPARC
* Then set pcie completion timeout
* *
**/ **/
static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
{ {
#ifndef CONFIG_SPARC
u32 regval; u32 regval;
u32 i; u32 i;
#endif
s32 ret_val; s32 ret_val;
ret_val = ixgbe_start_hw_generic(hw); ret_val = ixgbe_start_hw_generic(hw);
#ifndef CONFIG_SPARC
/* Disable relaxed ordering */ /* Disable relaxed ordering */
for (i = 0; ((i < hw->mac.max_tx_queues) && for (i = 0; ((i < hw->mac.max_tx_queues) &&
(i < IXGBE_DCA_MAX_QUEUES_82598)); i++) { (i < IXGBE_DCA_MAX_QUEUES_82598)); i++) {
...@@ -197,7 +201,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) ...@@ -197,7 +201,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
} }
#endif
if (ret_val) if (ret_val)
return ret_val; return ret_val;
...@@ -1193,6 +1197,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = { ...@@ -1193,6 +1197,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.init_thermal_sensor_thresh = NULL, .init_thermal_sensor_thresh = NULL,
.prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_read = &prot_autoc_read_generic,
.prot_autoc_write = &prot_autoc_write_generic, .prot_autoc_write = &prot_autoc_write_generic,
.enable_rx = &ixgbe_enable_rx_generic,
.disable_rx = &ixgbe_disable_rx_generic,
}; };
static struct ixgbe_eeprom_operations eeprom_ops_82598 = { static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
......
...@@ -1977,7 +1977,10 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) ...@@ -1977,7 +1977,10 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval)
*/ */
hw->mac.ops.disable_rx_buff(hw); hw->mac.ops.disable_rx_buff(hw);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); if (regval & IXGBE_RXCTRL_RXEN)
hw->mac.ops.enable_rx(hw);
else
hw->mac.ops.disable_rx(hw);
hw->mac.ops.enable_rx_buff(hw); hw->mac.ops.enable_rx_buff(hw);
...@@ -2336,6 +2339,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = { ...@@ -2336,6 +2339,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
.prot_autoc_read = &prot_autoc_read_82599, .prot_autoc_read = &prot_autoc_read_82599,
.prot_autoc_write = &prot_autoc_write_82599, .prot_autoc_write = &prot_autoc_write_82599,
.enable_rx = &ixgbe_enable_rx_generic,
.disable_rx = &ixgbe_disable_rx_generic,
}; };
static struct ixgbe_eeprom_operations eeprom_ops_82599 = { static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
......
...@@ -312,7 +312,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) ...@@ -312,7 +312,6 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
{ {
u32 i; u32 i;
u32 regval;
/* Clear the rate limiters */ /* Clear the rate limiters */
for (i = 0; i < hw->mac.max_tx_queues; i++) { for (i = 0; i < hw->mac.max_tx_queues; i++) {
...@@ -321,20 +320,25 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) ...@@ -321,20 +320,25 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
} }
IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_FLUSH(hw);
#ifndef CONFIG_SPARC
/* Disable relaxed ordering */ /* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) { for (i = 0; i < hw->mac.max_tx_queues; i++) {
u32 regval;
regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i)); regval = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(i));
regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; regval &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval); IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(i), regval);
} }
for (i = 0; i < hw->mac.max_rx_queues; i++) { for (i = 0; i < hw->mac.max_rx_queues; i++) {
u32 regval;
regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); regval = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN | regval &= ~(IXGBE_DCA_RXCTRL_DATA_WRO_EN |
IXGBE_DCA_RXCTRL_HEAD_WRO_EN); IXGBE_DCA_RXCTRL_HEAD_WRO_EN);
IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
} }
#endif
return 0; return 0;
} }
...@@ -703,7 +707,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) ...@@ -703,7 +707,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw)
hw->adapter_stopped = true; hw->adapter_stopped = true;
/* Disable the receive unit */ /* Disable the receive unit */
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); hw->mac.ops.disable_rx(hw);
/* Clear interrupt mask to stop interrupts from being generated */ /* Clear interrupt mask to stop interrupts from being generated */
IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK);
...@@ -2639,7 +2643,10 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) ...@@ -2639,7 +2643,10 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw)
**/ **/
s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval)
{ {
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); if (regval & IXGBE_RXCTRL_RXEN)
hw->mac.ops.enable_rx(hw);
else
hw->mac.ops.disable_rx(hw);
return 0; return 0;
} }
...@@ -3850,3 +3857,44 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) ...@@ -3850,3 +3857,44 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
return 0; return 0;
} }
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw)
{
u32 rxctrl;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
if (hw->mac.type != ixgbe_mac_82598EB) {
u32 pfdtxgswc;
pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
hw->mac.set_lben = true;
} else {
hw->mac.set_lben = false;
}
}
rxctrl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
}
}
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw)
{
u32 rxctrl;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN));
if (hw->mac.type != ixgbe_mac_82598EB) {
if (hw->mac.set_lben) {
u32 pfdtxgswc;
pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN;
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
hw->mac.set_lben = false;
}
}
}
...@@ -130,6 +130,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, ...@@ -130,6 +130,8 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
void ixgbe_disable_rx_generic(struct ixgbe_hw *hw);
void ixgbe_enable_rx_generic(struct ixgbe_hw *hw);
#define IXGBE_FAILED_READ_REG 0xffffffffU #define IXGBE_FAILED_READ_REG 0xffffffffU
#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU #define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
......
...@@ -1637,9 +1637,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1637,9 +1637,7 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
/* shut down the DMA engines now so they can be reinitialized later */ /* shut down the DMA engines now so they can be reinitialized later */
/* first Rx */ /* first Rx */
reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); hw->mac.ops.disable_rx(hw);
reg_ctl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
ixgbe_disable_rx_queue(adapter, rx_ring); ixgbe_disable_rx_queue(adapter, rx_ring);
/* now Tx */ /* now Tx */
...@@ -1670,6 +1668,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1670,6 +1668,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
{ {
struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; struct ixgbe_ring *tx_ring = &adapter->test_tx_ring;
struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; struct ixgbe_ring *rx_ring = &adapter->test_rx_ring;
struct ixgbe_hw *hw = &adapter->hw;
u32 rctl, reg_data; u32 rctl, reg_data;
int ret_val; int ret_val;
int err; int err;
...@@ -1713,14 +1712,16 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) ...@@ -1713,14 +1712,16 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
goto err_nomem; goto err_nomem;
} }
rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); hw->mac.ops.disable_rx(hw);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN);
ixgbe_configure_rx_ring(adapter, rx_ring); ixgbe_configure_rx_ring(adapter, rx_ring);
rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL);
rctl |= IXGBE_RXCTRL_DMBYPS;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl);
hw->mac.ops.enable_rx(hw);
return 0; return 0;
err_nomem: err_nomem:
......
...@@ -1619,14 +1619,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, ...@@ -1619,14 +1619,10 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct ixgbe_adapter *adapter = q_vector->adapter;
if (ixgbe_qv_busy_polling(q_vector)) if (ixgbe_qv_busy_polling(q_vector))
netif_receive_skb(skb); netif_receive_skb(skb);
else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL))
napi_gro_receive(&q_vector->napi, skb);
else else
netif_rx(skb); napi_gro_receive(&q_vector->napi, skb);
} }
/** /**
...@@ -3705,8 +3701,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) ...@@ -3705,8 +3701,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
u32 rxctrl, rfctl; u32 rxctrl, rfctl;
/* disable receives while setting up the descriptors */ /* disable receives while setting up the descriptors */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); hw->mac.ops.disable_rx(hw);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
ixgbe_setup_psrtype(adapter); ixgbe_setup_psrtype(adapter);
ixgbe_setup_rdrxctl(adapter); ixgbe_setup_rdrxctl(adapter);
...@@ -3731,6 +3726,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) ...@@ -3731,6 +3726,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter)
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]);
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
/* disable drop enable for 82598 parts */ /* disable drop enable for 82598 parts */
if (hw->mac.type == ixgbe_mac_82598EB) if (hw->mac.type == ixgbe_mac_82598EB)
rxctrl |= IXGBE_RXCTRL_DMBYPS; rxctrl |= IXGBE_RXCTRL_DMBYPS;
...@@ -5014,7 +5010,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ...@@ -5014,7 +5010,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct net_device *upper; struct net_device *upper;
struct list_head *iter; struct list_head *iter;
u32 rxctrl;
int i; int i;
/* signal that we are down to the interrupt handler */ /* signal that we are down to the interrupt handler */
...@@ -5022,8 +5017,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) ...@@ -5022,8 +5017,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
return; /* do nothing if already down */ return; /* do nothing if already down */
/* disable receives */ /* disable receives */
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); hw->mac.ops.disable_rx(hw);
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
/* disable all enabled rx queues */ /* disable all enabled rx queues */
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
...@@ -6174,7 +6168,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) ...@@ -6174,7 +6168,6 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter)
/* Cause software interrupt to ensure rings are cleaned */ /* Cause software interrupt to ensure rings are cleaned */
ixgbe_irq_rearm_queues(adapter, eics); ixgbe_irq_rearm_queues(adapter, eics);
} }
/** /**
...@@ -7507,14 +7500,9 @@ static void ixgbe_netpoll(struct net_device *netdev) ...@@ -7507,14 +7500,9 @@ static void ixgbe_netpoll(struct net_device *netdev)
if (test_bit(__IXGBE_DOWN, &adapter->state)) if (test_bit(__IXGBE_DOWN, &adapter->state))
return; return;
adapter->flags |= IXGBE_FLAG_IN_NETPOLL; /* loop through and schedule all active queues */
if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
for (i = 0; i < adapter->num_q_vectors; i++) for (i = 0; i < adapter->num_q_vectors; i++)
ixgbe_msix_clean_rings(0, adapter->q_vector[i]); ixgbe_msix_clean_rings(0, adapter->q_vector[i]);
} else {
ixgbe_intr(adapter->pdev->irq, netdev);
}
adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL;
} }
#endif #endif
......
...@@ -2462,8 +2462,8 @@ struct ixgbe_hic_read_shadow_ram { ...@@ -2462,8 +2462,8 @@ struct ixgbe_hic_read_shadow_ram {
struct ixgbe_hic_write_shadow_ram { struct ixgbe_hic_write_shadow_ram {
union ixgbe_hic_hdr2 hdr; union ixgbe_hic_hdr2 hdr;
u32 address; __be32 address;
u16 length; __be16 length;
u16 pad2; u16 pad2;
u16 data; u16 data;
u16 pad3; u16 pad3;
...@@ -3067,6 +3067,8 @@ struct ixgbe_mac_operations { ...@@ -3067,6 +3067,8 @@ struct ixgbe_mac_operations {
s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8);
s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); s32 (*get_thermal_sensor_data)(struct ixgbe_hw *);
s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw);
void (*disable_rx)(struct ixgbe_hw *hw);
void (*enable_rx)(struct ixgbe_hw *hw);
void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int);
/* DMA Coalescing */ /* DMA Coalescing */
...@@ -3137,6 +3139,7 @@ struct ixgbe_mac_info { ...@@ -3137,6 +3139,7 @@ struct ixgbe_mac_info {
u8 flags; u8 flags;
u8 san_mac_rar_index; u8 san_mac_rar_index;
struct ixgbe_thermal_sensor_data thermal_sensor_data; struct ixgbe_thermal_sensor_data thermal_sensor_data;
bool set_lben;
}; };
struct ixgbe_phy_info { struct ixgbe_phy_info {
......
...@@ -820,6 +820,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = { ...@@ -820,6 +820,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.init_thermal_sensor_thresh = NULL, .init_thermal_sensor_thresh = NULL,
.prot_autoc_read = &prot_autoc_read_generic, .prot_autoc_read = &prot_autoc_read_generic,
.prot_autoc_write = &prot_autoc_write_generic, .prot_autoc_write = &prot_autoc_write_generic,
.enable_rx = &ixgbe_enable_rx_generic,
.disable_rx = &ixgbe_disable_rx_generic,
}; };
static struct ixgbe_eeprom_operations eeprom_ops_X540 = { static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
......
...@@ -557,6 +557,47 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) ...@@ -557,6 +557,47 @@ static s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw)
return status; return status;
} }
/** ixgbe_disable_rx_x550 - Disable RX unit
*
* Enables the Rx DMA unit for x550
**/
static void ixgbe_disable_rx_x550(struct ixgbe_hw *hw)
{
u32 rxctrl, pfdtxgswc;
s32 status;
struct ixgbe_hic_disable_rxen fw_cmd;
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
hw->mac.set_lben = true;
} else {
hw->mac.set_lben = false;
}
fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD;
fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN;
fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
fw_cmd.port_number = (u8)hw->bus.lan_id;
status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd,
sizeof(struct ixgbe_hic_disable_rxen),
IXGBE_HI_COMMAND_TIMEOUT, true);
/* If we fail - disable RX using register write */
if (status) {
rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
if (rxctrl & IXGBE_RXCTRL_RXEN) {
rxctrl &= ~IXGBE_RXCTRL_RXEN;
IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
}
}
}
}
/** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash /** ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash
* @hw: pointer to hardware structure * @hw: pointer to hardware structure
* *
...@@ -1306,8 +1347,8 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) ...@@ -1306,8 +1347,8 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
* @enable: enable or disable switch for Ethertype anti-spoofing * @enable: enable or disable switch for Ethertype anti-spoofing
* @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing
**/ **/
void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw,
int vf) bool enable, int vf)
{ {
int vf_target_reg = vf >> 3; int vf_target_reg = vf >> 3;
int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT;
...@@ -1366,6 +1407,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable, ...@@ -1366,6 +1407,8 @@ void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, bool enable,
.init_thermal_sensor_thresh = NULL, \ .init_thermal_sensor_thresh = NULL, \
.prot_autoc_read = &prot_autoc_read_generic, \ .prot_autoc_read = &prot_autoc_read_generic, \
.prot_autoc_write = &prot_autoc_write_generic, \ .prot_autoc_write = &prot_autoc_write_generic, \
.enable_rx = &ixgbe_enable_rx_generic, \
.disable_rx = &ixgbe_disable_rx_x550, \
static struct ixgbe_mac_operations mac_ops_X550 = { static struct ixgbe_mac_operations mac_ops_X550 = {
X550_COMMON_MAC X550_COMMON_MAC
......
/******************************************************************************* /*******************************************************************************
Intel 82599 Virtual Function driver Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2012 Intel Corporation. Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., this program; if not, see <http://www.gnu.org/licenses/>.
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in The full GNU General Public License is included in this distribution in
the file called "COPYING". the file called "COPYING".
...@@ -182,7 +181,7 @@ typedef u32 ixgbe_link_speed; ...@@ -182,7 +181,7 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ #define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */
#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ #define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */
#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ #define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */
#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ #define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor ext (0 = legacy) */
#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ #define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */
#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ #define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */
#define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS) #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | IXGBE_TXD_CMD_RS)
......
/******************************************************************************* /*******************************************************************************
Intel 82599 Virtual Function driver Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2014 Intel Corporation. Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., this program; if not, see <http://www.gnu.org/licenses/>.
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in The full GNU General Public License is included in this distribution in
the file called "COPYING". the file called "COPYING".
...@@ -100,6 +99,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { ...@@ -100,6 +99,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Register test (offline)",
"Link test (on/offline)" "Link test (on/offline)"
}; };
#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
static int ixgbevf_get_settings(struct net_device *netdev, static int ixgbevf_get_settings(struct net_device *netdev,
...@@ -120,6 +120,7 @@ static int ixgbevf_get_settings(struct net_device *netdev, ...@@ -120,6 +120,7 @@ static int ixgbevf_get_settings(struct net_device *netdev,
if (link_up) { if (link_up) {
__u32 speed = SPEED_10000; __u32 speed = SPEED_10000;
switch (link_speed) { switch (link_speed) {
case IXGBE_LINK_SPEED_10GB_FULL: case IXGBE_LINK_SPEED_10GB_FULL:
speed = SPEED_10000; speed = SPEED_10000;
...@@ -145,12 +146,14 @@ static int ixgbevf_get_settings(struct net_device *netdev, ...@@ -145,12 +146,14 @@ static int ixgbevf_get_settings(struct net_device *netdev,
static u32 ixgbevf_get_msglevel(struct net_device *netdev) static u32 ixgbevf_get_msglevel(struct net_device *netdev)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
return adapter->msg_enable; return adapter->msg_enable;
} }
static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data) static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data; adapter->msg_enable = data;
} }
...@@ -185,7 +188,8 @@ static void ixgbevf_get_regs(struct net_device *netdev, ...@@ -185,7 +188,8 @@ static void ixgbevf_get_regs(struct net_device *netdev,
/* Interrupt */ /* Interrupt */
/* don't read EICR because it can clear interrupt causes, instead /* don't read EICR because it can clear interrupt causes, instead
* read EICS which is a shadow but doesn't clear EICR */ * read EICS which is a shadow but doesn't clear EICR
*/
regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS); regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS); regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
...@@ -404,7 +408,7 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev, ...@@ -404,7 +408,7 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data) struct ethtool_stats *stats, u64 *data)
{ {
struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_adapter *adapter = netdev_priv(netdev);
char *base = (char *) adapter; char *base = (char *)adapter;
int i; int i;
#ifdef BP_EXTENDED_STATS #ifdef BP_EXTENDED_STATS
u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0, u64 rx_yields = 0, rx_cleaned = 0, rx_missed = 0,
...@@ -594,8 +598,7 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) ...@@ -594,8 +598,7 @@ static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
} }
test = reg_test_vf; test = reg_test_vf;
/* /* Perform the register test, looping through the test table
* Perform the register test, looping through the test table
* until we either fail or reach the null entry. * until we either fail or reach the null entry.
*/ */
while (test->reg) { while (test->reg) {
...@@ -670,7 +673,8 @@ static void ixgbevf_diag_test(struct net_device *netdev, ...@@ -670,7 +673,8 @@ static void ixgbevf_diag_test(struct net_device *netdev,
hw_dbg(&adapter->hw, "offline testing starting\n"); hw_dbg(&adapter->hw, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't /* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */ * interfere with test result
*/
if (ixgbevf_link_test(adapter, &data[1])) if (ixgbevf_link_test(adapter, &data[1]))
eth_test->flags |= ETH_TEST_FL_FAILED; eth_test->flags |= ETH_TEST_FL_FAILED;
...@@ -724,7 +728,7 @@ static int ixgbevf_get_coalesce(struct net_device *netdev, ...@@ -724,7 +728,7 @@ static int ixgbevf_get_coalesce(struct net_device *netdev,
else else
ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2; ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
/* if in mixed tx/rx queues per vector mode, report only rx settings */ /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
return 0; return 0;
...@@ -745,12 +749,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, ...@@ -745,12 +749,11 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
int num_vectors, i; int num_vectors, i;
u16 tx_itr_param, rx_itr_param; u16 tx_itr_param, rx_itr_param;
/* don't accept tx specific changes if we've got mixed RxTx vectors */ /* don't accept Tx specific changes if we've got mixed RxTx vectors */
if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count if (adapter->q_vector[0]->tx.count &&
&& ec->tx_coalesce_usecs) adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
return -EINVAL; return -EINVAL;
if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
(ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
return -EINVAL; return -EINVAL;
...@@ -765,7 +768,6 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, ...@@ -765,7 +768,6 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
else else
rx_itr_param = adapter->rx_itr_setting; rx_itr_param = adapter->rx_itr_setting;
if (ec->tx_coalesce_usecs > 1) if (ec->tx_coalesce_usecs > 1)
adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2; adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
else else
...@@ -781,10 +783,10 @@ static int ixgbevf_set_coalesce(struct net_device *netdev, ...@@ -781,10 +783,10 @@ static int ixgbevf_set_coalesce(struct net_device *netdev,
for (i = 0; i < num_vectors; i++) { for (i = 0; i < num_vectors; i++) {
q_vector = adapter->q_vector[i]; q_vector = adapter->q_vector[i];
if (q_vector->tx.count && !q_vector->rx.count) if (q_vector->tx.count && !q_vector->rx.count)
/* tx only */ /* Tx only */
q_vector->itr = tx_itr_param; q_vector->itr = tx_itr_param;
else else
/* rx only or mixed */ /* Rx only or mixed */
q_vector->itr = rx_itr_param; q_vector->itr = rx_itr_param;
ixgbevf_write_eitr(q_vector); ixgbevf_write_eitr(q_vector);
} }
......
/******************************************************************************* /*******************************************************************************
Intel 82599 Virtual Function driver Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2014 Intel Corporation. Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., this program; if not, see <http://www.gnu.org/licenses/>.
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in The full GNU General Public License is included in this distribution in
the file called "COPYING". the file called "COPYING".
...@@ -51,7 +50,8 @@ ...@@ -51,7 +50,8 @@
#define DESC_NEEDED (MAX_SKB_FRAGS + 4) #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
/* wrapper around a pointer to a socket buffer, /* wrapper around a pointer to a socket buffer,
* so a DMA handle can be stored along with the buffer */ * so a DMA handle can be stored along with the buffer
*/
struct ixgbevf_tx_buffer { struct ixgbevf_tx_buffer {
union ixgbe_adv_tx_desc *next_to_watch; union ixgbe_adv_tx_desc *next_to_watch;
unsigned long time_stamp; unsigned long time_stamp;
...@@ -132,9 +132,10 @@ struct ixgbevf_ring { ...@@ -132,9 +132,10 @@ struct ixgbevf_ring {
u8 __iomem *tail; u8 __iomem *tail;
struct sk_buff *skb; struct sk_buff *skb;
u16 reg_idx; /* holds the special value that gets the hardware register /* holds the special value that gets the hardware register offset
* offset associated with this ring, which is different * associated with this ring, which is different for DCB and RSS modes
* for DCB and RSS modes */ */
u16 reg_idx;
int queue_index; /* needed for multiqueue queue management */ int queue_index; /* needed for multiqueue queue management */
}; };
...@@ -186,9 +187,10 @@ struct ixgbevf_ring_container { ...@@ -186,9 +187,10 @@ struct ixgbevf_ring_container {
*/ */
struct ixgbevf_q_vector { struct ixgbevf_q_vector {
struct ixgbevf_adapter *adapter; struct ixgbevf_adapter *adapter;
u16 v_idx; /* index of q_vector within array, also used for /* index of q_vector within array, also used for finding the bit in
* finding the bit in EICR and friends that * EICR and friends that represents the vector for this ring
* represents the vector for this ring */ */
u16 v_idx;
u16 itr; /* Interrupt throttle rate written to EITR */ u16 itr; /* Interrupt throttle rate written to EITR */
struct napi_struct napi; struct napi_struct napi;
struct ixgbevf_ring_container rx, tx; struct ixgbevf_ring_container rx, tx;
...@@ -203,15 +205,17 @@ struct ixgbevf_q_vector { ...@@ -203,15 +205,17 @@ struct ixgbevf_q_vector {
#define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED) #define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
#define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
#define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
#define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD) #define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | \
#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD) IXGBEVF_QV_STATE_POLL_YIELD)
#define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | \
IXGBEVF_QV_STATE_POLL_YIELD)
spinlock_t lock; spinlock_t lock;
#endif /* CONFIG_NET_RX_BUSY_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
}; };
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
{ {
spin_lock_init(&q_vector->lock); spin_lock_init(&q_vector->lock);
q_vector->state = IXGBEVF_QV_STATE_IDLE; q_vector->state = IXGBEVF_QV_STATE_IDLE;
} }
...@@ -220,6 +224,7 @@ static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector) ...@@ -220,6 +224,7 @@ static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
{ {
int rc = true; int rc = true;
spin_lock_bh(&q_vector->lock); spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBEVF_QV_LOCKED) { if (q_vector->state & IXGBEVF_QV_LOCKED) {
WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI); WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
...@@ -240,6 +245,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector) ...@@ -240,6 +245,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
{ {
int rc = false; int rc = false;
spin_lock_bh(&q_vector->lock); spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL | WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
IXGBEVF_QV_STATE_NAPI_YIELD)); IXGBEVF_QV_STATE_NAPI_YIELD));
...@@ -256,6 +262,7 @@ static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector) ...@@ -256,6 +262,7 @@ static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
{ {
int rc = true; int rc = true;
spin_lock_bh(&q_vector->lock); spin_lock_bh(&q_vector->lock);
if ((q_vector->state & IXGBEVF_QV_LOCKED)) { if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD; q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
...@@ -275,6 +282,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector) ...@@ -275,6 +282,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
{ {
int rc = false; int rc = false;
spin_lock_bh(&q_vector->lock); spin_lock_bh(&q_vector->lock);
WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI)); WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
...@@ -297,6 +305,7 @@ static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector) ...@@ -297,6 +305,7 @@ static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
{ {
int rc = true; int rc = true;
spin_lock_bh(&q_vector->lock); spin_lock_bh(&q_vector->lock);
if (q_vector->state & IXGBEVF_QV_OWNED) if (q_vector->state & IXGBEVF_QV_OWNED)
rc = false; rc = false;
...@@ -307,8 +316,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector) ...@@ -307,8 +316,7 @@ static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
#endif /* CONFIG_NET_RX_BUSY_POLL */ #endif /* CONFIG_NET_RX_BUSY_POLL */
/* /* microsecond values for various ITR rates shifted by 2 to fit itr register
* microsecond values for various ITR rates shifted by 2 to fit itr register
* with the first 3 bits reserved 0 * with the first 3 bits reserved 0
*/ */
#define IXGBE_MIN_RSC_ITR 24 #define IXGBE_MIN_RSC_ITR 24
......
/******************************************************************************* /*******************************************************************************
Intel 82599 Virtual Function driver Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2014 Intel Corporation. Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., this program; if not, see <http://www.gnu.org/licenses/>.
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in The full GNU General Public License is included in this distribution in
the file called "COPYING". the file called "COPYING".
...@@ -25,7 +24,6 @@ ...@@ -25,7 +24,6 @@
*******************************************************************************/ *******************************************************************************/
/****************************************************************************** /******************************************************************************
Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code
******************************************************************************/ ******************************************************************************/
...@@ -170,12 +168,13 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) ...@@ -170,12 +168,13 @@ u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg)
* @direction: 0 for Rx, 1 for Tx, -1 for other causes * @direction: 0 for Rx, 1 for Tx, -1 for other causes
* @queue: queue to map the corresponding interrupt to * @queue: queue to map the corresponding interrupt to
* @msix_vector: the vector to map to the corresponding queue * @msix_vector: the vector to map to the corresponding queue
*/ **/
static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
u8 queue, u8 msix_vector) u8 queue, u8 msix_vector)
{ {
u32 ivar, index; u32 ivar, index;
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
if (direction == -1) { if (direction == -1) {
/* other causes */ /* other causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL; msix_vector |= IXGBE_IVAR_ALLOC_VAL;
...@@ -184,7 +183,7 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, ...@@ -184,7 +183,7 @@ static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
ivar |= msix_vector; ivar |= msix_vector;
IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar); IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
} else { } else {
/* tx or rx causes */ /* Tx or Rx causes */
msix_vector |= IXGBE_IVAR_ALLOC_VAL; msix_vector |= IXGBE_IVAR_ALLOC_VAL;
index = ((16 * (queue & 1)) + (8 * direction)); index = ((16 * (queue & 1)) + (8 * direction));
ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1)); ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(queue >> 1));
...@@ -458,11 +457,12 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector, ...@@ -458,11 +457,12 @@ static void ixgbevf_rx_skb(struct ixgbevf_q_vector *q_vector,
napi_gro_receive(&q_vector->napi, skb); napi_gro_receive(&q_vector->napi, skb);
} }
/* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum /**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @ring: structure containig ring specific data * @ring: structure containig ring specific data
* @rx_desc: current Rx descriptor being processed * @rx_desc: current Rx descriptor being processed
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
*/ **/
static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -492,7 +492,8 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, ...@@ -492,7 +492,8 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
} }
/* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor /**
* ixgbevf_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on * @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor * @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated * @skb: pointer to current skb being populated
...@@ -500,7 +501,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring, ...@@ -500,7 +501,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
* This function checks the ring, descriptor, and packet information in * This function checks the ring, descriptor, and packet information in
* order to populate the checksum, VLAN, protocol, and other fields within * order to populate the checksum, VLAN, protocol, and other fields within
* the skb. * the skb.
*/ **/
static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring, static void ixgbevf_process_skb_fields(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -647,7 +648,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, ...@@ -647,7 +648,8 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
} }
} }
/* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail /**
* ixgbevf_pull_tail - ixgbevf specific version of skb_pull_tail
* @rx_ring: rx descriptor ring packet is being transacted on * @rx_ring: rx descriptor ring packet is being transacted on
* @skb: pointer to current skb being adjusted * @skb: pointer to current skb being adjusted
* *
...@@ -657,7 +659,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring, ...@@ -657,7 +659,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
* that allow for significant optimizations versus the standard function. * that allow for significant optimizations versus the standard function.
* As a result we can do things like drop a frag and maintain an accurate * As a result we can do things like drop a frag and maintain an accurate
* truesize for the skb. * truesize for the skb.
*/ **/
static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
struct sk_buff *skb) struct sk_buff *skb)
{ {
...@@ -686,7 +688,8 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, ...@@ -686,7 +688,8 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
skb->tail += pull_len; skb->tail += pull_len;
} }
/* ixgbevf_cleanup_headers - Correct corrupted or empty headers /**
* ixgbevf_cleanup_headers - Correct corrupted or empty headers
* @rx_ring: rx descriptor ring packet is being transacted on * @rx_ring: rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor * @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being fixed * @skb: pointer to current skb being fixed
...@@ -702,7 +705,7 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring, ...@@ -702,7 +705,7 @@ static void ixgbevf_pull_tail(struct ixgbevf_ring *rx_ring,
* it is large enough to qualify as a valid Ethernet frame. * it is large enough to qualify as a valid Ethernet frame.
* *
* Returns true if an error was encountered and skb was freed. * Returns true if an error was encountered and skb was freed.
*/ **/
static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb)
...@@ -729,12 +732,13 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring, ...@@ -729,12 +732,13 @@ static bool ixgbevf_cleanup_headers(struct ixgbevf_ring *rx_ring,
return false; return false;
} }
/* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring /**
* ixgbevf_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on * @rx_ring: rx descriptor ring to store buffers on
* @old_buff: donor buffer to have page reused * @old_buff: donor buffer to have page reused
* *
* Synchronizes page for reuse by the adapter * Synchronizes page for reuse by the adapter
*/ **/
static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring, static void ixgbevf_reuse_rx_page(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *old_buff) struct ixgbevf_rx_buffer *old_buff)
{ {
...@@ -764,7 +768,8 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) ...@@ -764,7 +768,8 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc; return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
} }
/* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff /**
* ixgbevf_add_rx_frag - Add contents of Rx buffer to sk_buff
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: rx descriptor ring to transact packets on
* @rx_buffer: buffer containing page to add * @rx_buffer: buffer containing page to add
* @rx_desc: descriptor containing length of buffer written by hardware * @rx_desc: descriptor containing length of buffer written by hardware
...@@ -777,7 +782,7 @@ static inline bool ixgbevf_page_is_reserved(struct page *page) ...@@ -777,7 +782,7 @@ static inline bool ixgbevf_page_is_reserved(struct page *page)
* *
* The function will then update the page offset if necessary and return * The function will then update the page offset if necessary and return
* true if the buffer can be reused by the adapter. * true if the buffer can be reused by the adapter.
*/ **/
static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring, static bool ixgbevf_add_rx_frag(struct ixgbevf_ring *rx_ring,
struct ixgbevf_rx_buffer *rx_buffer, struct ixgbevf_rx_buffer *rx_buffer,
union ixgbe_adv_rx_desc *rx_desc, union ixgbe_adv_rx_desc *rx_desc,
...@@ -1016,7 +1021,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ...@@ -1016,7 +1021,8 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
#endif #endif
/* attempt to distribute budget to each queue fairly, but don't allow /* attempt to distribute budget to each queue fairly, but don't allow
* the budget to go below 1 because we'll exit polling */ * the budget to go below 1 because we'll exit polling
*/
if (q_vector->rx.count > 1) if (q_vector->rx.count > 1)
per_ring_budget = max(budget/q_vector->rx.count, 1); per_ring_budget = max(budget/q_vector->rx.count, 1);
else else
...@@ -1049,7 +1055,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) ...@@ -1049,7 +1055,7 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget)
/** /**
* ixgbevf_write_eitr - write VTEITR register in hardware specific way * ixgbevf_write_eitr - write VTEITR register in hardware specific way
* @q_vector: structure containing interrupt and ring information * @q_vector: structure containing interrupt and ring information
*/ **/
void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
{ {
struct ixgbevf_adapter *adapter = q_vector->adapter; struct ixgbevf_adapter *adapter = q_vector->adapter;
...@@ -1057,8 +1063,7 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector) ...@@ -1057,8 +1063,7 @@ void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector)
int v_idx = q_vector->v_idx; int v_idx = q_vector->v_idx;
u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR; u32 itr_reg = q_vector->itr & IXGBE_MAX_EITR;
/* /* set the WDIS bit to not clear the timer bits and cause an
* set the WDIS bit to not clear the timer bits and cause an
* immediate assertion of the interrupt * immediate assertion of the interrupt
*/ */
itr_reg |= IXGBE_EITR_CNT_WDIS; itr_reg |= IXGBE_EITR_CNT_WDIS;
...@@ -1115,12 +1120,12 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) ...@@ -1115,12 +1120,12 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
adapter->eims_enable_mask = 0; adapter->eims_enable_mask = 0;
/* /* Populate the IVAR table and set the ITR values to the
* Populate the IVAR table and set the ITR values to the
* corresponding register. * corresponding register.
*/ */
for (v_idx = 0; v_idx < q_vectors; v_idx++) { for (v_idx = 0; v_idx < q_vectors; v_idx++) {
struct ixgbevf_ring *ring; struct ixgbevf_ring *ring;
q_vector = adapter->q_vector[v_idx]; q_vector = adapter->q_vector[v_idx];
ixgbevf_for_each_ring(ring, q_vector->rx) ixgbevf_for_each_ring(ring, q_vector->rx)
...@@ -1130,13 +1135,13 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter) ...@@ -1130,13 +1135,13 @@ static void ixgbevf_configure_msix(struct ixgbevf_adapter *adapter)
ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx); ixgbevf_set_ivar(adapter, 1, ring->reg_idx, v_idx);
if (q_vector->tx.ring && !q_vector->rx.ring) { if (q_vector->tx.ring && !q_vector->rx.ring) {
/* tx only vector */ /* Tx only vector */
if (adapter->tx_itr_setting == 1) if (adapter->tx_itr_setting == 1)
q_vector->itr = IXGBE_10K_ITR; q_vector->itr = IXGBE_10K_ITR;
else else
q_vector->itr = adapter->tx_itr_setting; q_vector->itr = adapter->tx_itr_setting;
} else { } else {
/* rx or rx/tx vector */ /* Rx or Rx/Tx vector */
if (adapter->rx_itr_setting == 1) if (adapter->rx_itr_setting == 1)
q_vector->itr = IXGBE_20K_ITR; q_vector->itr = IXGBE_20K_ITR;
else else
...@@ -1187,7 +1192,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector, ...@@ -1187,7 +1192,7 @@ static void ixgbevf_update_itr(struct ixgbevf_q_vector *q_vector,
if (packets == 0) if (packets == 0)
return; return;
/* simple throttlerate management /* simple throttle rate management
* 0-20MB/s lowest (100000 ints/s) * 0-20MB/s lowest (100000 ints/s)
* 20-100MB/s low (20000 ints/s) * 20-100MB/s low (20000 ints/s)
* 100-1249MB/s bulk (8000 ints/s) * 100-1249MB/s bulk (8000 ints/s)
...@@ -1330,8 +1335,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) ...@@ -1330,8 +1335,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
/* /* The ideal configuration...
* The ideal configuration...
* We have enough vectors to map one per queue. * We have enough vectors to map one per queue.
*/ */
if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) { if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
...@@ -1343,8 +1347,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter) ...@@ -1343,8 +1347,7 @@ static int ixgbevf_map_rings_to_vectors(struct ixgbevf_adapter *adapter)
goto out; goto out;
} }
/* /* If we don't have enough vectors for a 1-to-1
* If we don't have enough vectors for a 1-to-1
* mapping, we'll have to group them so there are * mapping, we'll have to group them so there are
* multiple queues per vector. * multiple queues per vector.
*/ */
...@@ -1406,8 +1409,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) ...@@ -1406,8 +1409,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
q_vector->name, q_vector); q_vector->name, q_vector);
if (err) { if (err) {
hw_dbg(&adapter->hw, hw_dbg(&adapter->hw,
"request_irq failed for MSIX interrupt " "request_irq failed for MSIX interrupt Error: %d\n",
"Error: %d\n", err); err);
goto free_queue_irqs; goto free_queue_irqs;
} }
} }
...@@ -1415,8 +1418,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter) ...@@ -1415,8 +1418,8 @@ static int ixgbevf_request_msix_irqs(struct ixgbevf_adapter *adapter)
err = request_irq(adapter->msix_entries[vector].vector, err = request_irq(adapter->msix_entries[vector].vector,
&ixgbevf_msix_other, 0, netdev->name, adapter); &ixgbevf_msix_other, 0, netdev->name, adapter);
if (err) { if (err) {
hw_dbg(&adapter->hw, hw_dbg(&adapter->hw, "request_irq for msix_other failed: %d\n",
"request_irq for msix_other failed: %d\n", err); err);
goto free_queue_irqs; goto free_queue_irqs;
} }
...@@ -1448,6 +1451,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter) ...@@ -1448,6 +1451,7 @@ static inline void ixgbevf_reset_q_vectors(struct ixgbevf_adapter *adapter)
for (i = 0; i < q_vectors; i++) { for (i = 0; i < q_vectors; i++) {
struct ixgbevf_q_vector *q_vector = adapter->q_vector[i]; struct ixgbevf_q_vector *q_vector = adapter->q_vector[i];
q_vector->rx.ring = NULL; q_vector->rx.ring = NULL;
q_vector->tx.ring = NULL; q_vector->tx.ring = NULL;
q_vector->rx.count = 0; q_vector->rx.count = 0;
...@@ -1469,8 +1473,7 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter) ...@@ -1469,8 +1473,7 @@ static int ixgbevf_request_irq(struct ixgbevf_adapter *adapter)
err = ixgbevf_request_msix_irqs(adapter); err = ixgbevf_request_msix_irqs(adapter);
if (err) if (err)
hw_dbg(&adapter->hw, hw_dbg(&adapter->hw, "request_irq failed, Error %d\n", err);
"request_irq failed, Error %d\n", err);
return err; return err;
} }
...@@ -1659,7 +1662,7 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter, ...@@ -1659,7 +1662,7 @@ static void ixgbevf_disable_rx_queue(struct ixgbevf_adapter *adapter,
/* write value back with RXDCTL.ENABLE bit cleared */ /* write value back with RXDCTL.ENABLE bit cleared */
IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl); IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
/* the hardware may take up to 100us to really disable the rx queue */ /* the hardware may take up to 100us to really disable the Rx queue */
do { do {
udelay(10); udelay(10);
rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx)); rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(reg_idx));
...@@ -1786,7 +1789,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter) ...@@ -1786,7 +1789,8 @@ static void ixgbevf_configure_rx(struct ixgbevf_adapter *adapter)
ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN); ixgbevf_rlpml_set_vf(hw, netdev->mtu + ETH_HLEN + ETH_FCS_LEN);
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */ * the Base and Length of the Rx Descriptor Ring
*/
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]); ixgbevf_configure_rx_ring(adapter, adapter->rx_ring[i]);
} }
...@@ -1858,14 +1862,14 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) ...@@ -1858,14 +1862,14 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
if (!netdev_uc_empty(netdev)) { if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;
netdev_for_each_uc_addr(ha, netdev) { netdev_for_each_uc_addr(ha, netdev) {
hw->mac.ops.set_uc_addr(hw, ++count, ha->addr); hw->mac.ops.set_uc_addr(hw, ++count, ha->addr);
udelay(200); udelay(200);
} }
} else { } else {
/* /* If the list is empty then send message to PF driver to
* If the list is empty then send message to PF driver to * clear all MAC VLANs on this VF.
* clear all macvlans on this VF.
*/ */
hw->mac.ops.set_uc_addr(hw, 0, NULL); hw->mac.ops.set_uc_addr(hw, 0, NULL);
} }
...@@ -2184,7 +2188,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter) ...@@ -2184,7 +2188,7 @@ void ixgbevf_down(struct ixgbevf_adapter *adapter)
if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state)) if (test_and_set_bit(__IXGBEVF_DOWN, &adapter->state))
return; /* do nothing if already down */ return; /* do nothing if already down */
/* disable all enabled rx queues */ /* disable all enabled Rx queues */
for (i = 0; i < adapter->num_rx_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]); ixgbevf_disable_rx_queue(adapter, adapter->rx_ring[i]);
...@@ -2406,8 +2410,7 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) ...@@ -2406,8 +2410,7 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
int err = 0; int err = 0;
int vector, v_budget; int vector, v_budget;
/* /* It's easy to be greedy for MSI-X vectors, but it really
* It's easy to be greedy for MSI-X vectors, but it really
* doesn't do us much good if we have a lot more vectors * doesn't do us much good if we have a lot more vectors
* than CPU's. So let's be conservative and only ask for * than CPU's. So let's be conservative and only ask for
* (roughly) the same number of vectors as there are CPU's. * (roughly) the same number of vectors as there are CPU's.
...@@ -2418,7 +2421,8 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter) ...@@ -2418,7 +2421,8 @@ static int ixgbevf_set_interrupt_capability(struct ixgbevf_adapter *adapter)
v_budget += NON_Q_VECTORS; v_budget += NON_Q_VECTORS;
/* A failure in MSI-X entry allocation isn't fatal, but it does /* A failure in MSI-X entry allocation isn't fatal, but it does
* mean we disable MSI-X capabilities of the adapter. */ * mean we disable MSI-X capabilities of the adapter.
*/
adapter->msix_entries = kcalloc(v_budget, adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL); sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->msix_entries) { if (!adapter->msix_entries) {
...@@ -2544,8 +2548,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) ...@@ -2544,8 +2548,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
err = ixgbevf_alloc_q_vectors(adapter); err = ixgbevf_alloc_q_vectors(adapter);
if (err) { if (err) {
hw_dbg(&adapter->hw, "Unable to allocate memory for queue " hw_dbg(&adapter->hw, "Unable to allocate memory for queue vectors\n");
"vectors\n");
goto err_alloc_q_vectors; goto err_alloc_q_vectors;
} }
...@@ -2555,8 +2558,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter) ...@@ -2555,8 +2558,7 @@ static int ixgbevf_init_interrupt_scheme(struct ixgbevf_adapter *adapter)
goto err_alloc_queues; goto err_alloc_queues;
} }
hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, " hw_dbg(&adapter->hw, "Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n",
"Tx Queue count = %u\n",
(adapter->num_rx_queues > 1) ? "Enabled" : (adapter->num_rx_queues > 1) ? "Enabled" :
"Disabled", adapter->num_rx_queues, adapter->num_tx_queues); "Disabled", adapter->num_rx_queues, adapter->num_tx_queues);
...@@ -2600,7 +2602,6 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter) ...@@ -2600,7 +2602,6 @@ static void ixgbevf_clear_interrupt_scheme(struct ixgbevf_adapter *adapter)
/** /**
* ixgbevf_sw_init - Initialize general software structures * ixgbevf_sw_init - Initialize general software structures
* (struct ixgbevf_adapter)
* @adapter: board private structure to initialize * @adapter: board private structure to initialize
* *
* ixgbevf_sw_init initializes the Adapter private data structure. * ixgbevf_sw_init initializes the Adapter private data structure.
...@@ -2615,7 +2616,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter) ...@@ -2615,7 +2616,6 @@ static int ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
int err; int err;
/* PCI config space info */ /* PCI config space info */
hw->vendor_id = pdev->vendor; hw->vendor_id = pdev->vendor;
hw->device_id = pdev->device; hw->device_id = pdev->device;
hw->revision_id = pdev->revision; hw->revision_id = pdev->revision;
...@@ -2758,14 +2758,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter) ...@@ -2758,14 +2758,15 @@ static void ixgbevf_reset_subtask(struct ixgbevf_adapter *adapter)
ixgbevf_reinit_locked(adapter); ixgbevf_reinit_locked(adapter);
} }
/* ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts /**
* @adapter - pointer to the device adapter structure * ixgbevf_check_hang_subtask - check for hung queues and dropped interrupts
* @adapter: pointer to the device adapter structure
* *
* This function serves two purposes. First it strobes the interrupt lines * This function serves two purposes. First it strobes the interrupt lines
* in order to make certain interrupts are occurring. Secondly it sets the * in order to make certain interrupts are occurring. Secondly it sets the
* bits needed to check for TX hangs. As a result we should immediately * bits needed to check for TX hangs. As a result we should immediately
* determine if a hang has occurred. * determine if a hang has occurred.
*/ **/
static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
...@@ -2783,7 +2784,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) ...@@ -2783,7 +2784,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
set_check_for_tx_hang(adapter->tx_ring[i]); set_check_for_tx_hang(adapter->tx_ring[i]);
} }
/* get one bit for every active tx/rx interrupt vector */ /* get one bit for every active Tx/Rx interrupt vector */
for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) { for (i = 0; i < adapter->num_msix_vectors - NON_Q_VECTORS; i++) {
struct ixgbevf_q_vector *qv = adapter->q_vector[i]; struct ixgbevf_q_vector *qv = adapter->q_vector[i];
...@@ -2797,7 +2798,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter) ...@@ -2797,7 +2798,7 @@ static void ixgbevf_check_hang_subtask(struct ixgbevf_adapter *adapter)
/** /**
* ixgbevf_watchdog_update_link - update the link status * ixgbevf_watchdog_update_link - update the link status
* @adapter - pointer to the device adapter structure * @adapter: pointer to the device adapter structure
**/ **/
static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
{ {
...@@ -2825,7 +2826,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter) ...@@ -2825,7 +2826,7 @@ static void ixgbevf_watchdog_update_link(struct ixgbevf_adapter *adapter)
/** /**
* ixgbevf_watchdog_link_is_up - update netif_carrier status and * ixgbevf_watchdog_link_is_up - update netif_carrier status and
* print link up message * print link up message
* @adapter - pointer to the device adapter structure * @adapter: pointer to the device adapter structure
**/ **/
static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
{ {
...@@ -2850,7 +2851,7 @@ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter) ...@@ -2850,7 +2851,7 @@ static void ixgbevf_watchdog_link_is_up(struct ixgbevf_adapter *adapter)
/** /**
* ixgbevf_watchdog_link_is_down - update netif_carrier status and * ixgbevf_watchdog_link_is_down - update netif_carrier status and
* print link down message * print link down message
* @adapter - pointer to the adapter structure * @adapter: pointer to the adapter structure
**/ **/
static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter) static void ixgbevf_watchdog_link_is_down(struct ixgbevf_adapter *adapter)
{ {
...@@ -2956,7 +2957,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter) ...@@ -2956,7 +2957,7 @@ static void ixgbevf_free_all_tx_resources(struct ixgbevf_adapter *adapter)
/** /**
* ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors) * ixgbevf_setup_tx_resources - allocate Tx resources (Descriptors)
* @tx_ring: tx descriptor ring (for a specific queue) to setup * @tx_ring: Tx descriptor ring (for a specific queue) to setup
* *
* Return 0 on success, negative on failure * Return 0 on success, negative on failure
**/ **/
...@@ -2983,8 +2984,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring) ...@@ -2983,8 +2984,7 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_ring *tx_ring)
err: err:
vfree(tx_ring->tx_buffer_info); vfree(tx_ring->tx_buffer_info);
tx_ring->tx_buffer_info = NULL; tx_ring->tx_buffer_info = NULL;
hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit " hw_dbg(&adapter->hw, "Unable to allocate memory for the transmit descriptor ring\n");
"descriptor ring\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -3006,8 +3006,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) ...@@ -3006,8 +3006,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]); err = ixgbevf_setup_tx_resources(adapter->tx_ring[i]);
if (!err) if (!err)
continue; continue;
hw_dbg(&adapter->hw, hw_dbg(&adapter->hw, "Allocation for Tx Queue %u failed\n", i);
"Allocation for Tx Queue %u failed\n", i);
break; break;
} }
...@@ -3016,7 +3015,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) ...@@ -3016,7 +3015,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter)
/** /**
* ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors) * ixgbevf_setup_rx_resources - allocate Rx resources (Descriptors)
* @rx_ring: rx descriptor ring (for a specific queue) to setup * @rx_ring: Rx descriptor ring (for a specific queue) to setup
* *
* Returns 0 on success, negative on failure * Returns 0 on success, negative on failure
**/ **/
...@@ -3065,8 +3064,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter) ...@@ -3065,8 +3064,7 @@ static int ixgbevf_setup_all_rx_resources(struct ixgbevf_adapter *adapter)
err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]); err = ixgbevf_setup_rx_resources(adapter->rx_ring[i]);
if (!err) if (!err)
continue; continue;
hw_dbg(&adapter->hw, hw_dbg(&adapter->hw, "Allocation for Rx Queue %u failed\n", i);
"Allocation for Rx Queue %u failed\n", i);
break; break;
} }
return err; return err;
...@@ -3136,11 +3134,11 @@ static int ixgbevf_open(struct net_device *netdev) ...@@ -3136,11 +3134,11 @@ static int ixgbevf_open(struct net_device *netdev)
if (hw->adapter_stopped) { if (hw->adapter_stopped) {
ixgbevf_reset(adapter); ixgbevf_reset(adapter);
/* if adapter is still stopped then PF isn't up and /* if adapter is still stopped then PF isn't up and
* the vf can't start. */ * the VF can't start.
*/
if (hw->adapter_stopped) { if (hw->adapter_stopped) {
err = IXGBE_ERR_MBX; err = IXGBE_ERR_MBX;
pr_err("Unable to start - perhaps the PF Driver isn't " pr_err("Unable to start - perhaps the PF Driver isn't up yet\n");
"up yet\n");
goto err_setup_reset; goto err_setup_reset;
} }
} }
...@@ -3163,8 +3161,7 @@ static int ixgbevf_open(struct net_device *netdev) ...@@ -3163,8 +3161,7 @@ static int ixgbevf_open(struct net_device *netdev)
ixgbevf_configure(adapter); ixgbevf_configure(adapter);
/* /* Map the Tx/Rx rings to the vectors we were allotted.
* Map the Tx/Rx rings to the vectors we were allotted.
* if request_irq will be called in this function map_rings * if request_irq will be called in this function map_rings
* must be called *before* up_complete * must be called *before* up_complete
*/ */
...@@ -3288,6 +3285,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3288,6 +3285,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
if (first->protocol == htons(ETH_P_IP)) { if (first->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb); struct iphdr *iph = ip_hdr(skb);
iph->tot_len = 0; iph->tot_len = 0;
iph->check = 0; iph->check = 0;
tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
...@@ -3313,7 +3311,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -3313,7 +3311,7 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
*hdr_len += l4len; *hdr_len += l4len;
*hdr_len = skb_transport_offset(skb) + l4len; *hdr_len = skb_transport_offset(skb) + l4len;
/* update gso size and bytecount with header size */ /* update GSO size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs; first->gso_segs = skb_shinfo(skb)->gso_segs;
first->bytecount += (first->gso_segs - 1) * *hdr_len; first->bytecount += (first->gso_segs - 1) * *hdr_len;
...@@ -3343,6 +3341,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, ...@@ -3343,6 +3341,7 @@ static void ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
if (skb->ip_summed == CHECKSUM_PARTIAL) { if (skb->ip_summed == CHECKSUM_PARTIAL) {
u8 l4_hdr = 0; u8 l4_hdr = 0;
switch (first->protocol) { switch (first->protocol) {
case htons(ETH_P_IP): case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb); vlan_macip_lens |= skb_network_header_len(skb);
...@@ -3405,7 +3404,7 @@ static __le32 ixgbevf_tx_cmd_type(u32 tx_flags) ...@@ -3405,7 +3404,7 @@ static __le32 ixgbevf_tx_cmd_type(u32 tx_flags)
IXGBE_ADVTXD_DCMD_IFCS | IXGBE_ADVTXD_DCMD_IFCS |
IXGBE_ADVTXD_DCMD_DEXT); IXGBE_ADVTXD_DCMD_DEXT);
/* set HW vlan bit if vlan is present */ /* set HW VLAN bit if VLAN is present */
if (tx_flags & IXGBE_TX_FLAGS_VLAN) if (tx_flags & IXGBE_TX_FLAGS_VLAN)
cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE); cmd_type |= cpu_to_le32(IXGBE_ADVTXD_DCMD_VLE);
...@@ -3572,11 +3571,13 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size) ...@@ -3572,11 +3571,13 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Herbert's original patch had: /* Herbert's original patch had:
* smp_mb__after_netif_stop_queue(); * smp_mb__after_netif_stop_queue();
* but since that doesn't exist yet, just open code it. */ * but since that doesn't exist yet, just open code it.
*/
smp_mb(); smp_mb();
/* We need to check again in a case another CPU has just /* We need to check again in a case another CPU has just
* made room available. */ * made room available.
*/
if (likely(ixgbevf_desc_unused(tx_ring) < size)) if (likely(ixgbevf_desc_unused(tx_ring) < size))
return -EBUSY; return -EBUSY;
...@@ -3615,8 +3616,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -3615,8 +3616,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tx_ring = adapter->tx_ring[skb->queue_mapping]; tx_ring = adapter->tx_ring[skb->queue_mapping];
/* /* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
* + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD, * + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
* + 2 desc gap to keep tail from touching head, * + 2 desc gap to keep tail from touching head,
* + 1 desc for context descriptor, * + 1 desc for context descriptor,
...@@ -3794,8 +3794,7 @@ static int ixgbevf_resume(struct pci_dev *pdev) ...@@ -3794,8 +3794,7 @@ static int ixgbevf_resume(struct pci_dev *pdev)
u32 err; u32 err;
pci_restore_state(pdev); pci_restore_state(pdev);
/* /* pci_restore_state clears dev->state_saved so call
* pci_restore_state clears dev->state_saved so call
* pci_save_state to restore it. * pci_save_state to restore it.
*/ */
pci_save_state(pdev); pci_save_state(pdev);
...@@ -3930,8 +3929,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3930,8 +3929,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else { } else {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) { if (err) {
dev_err(&pdev->dev, "No usable DMA " dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
"configuration, aborting\n");
goto err_dma; goto err_dma;
} }
pci_using_dac = 0; pci_using_dac = 0;
...@@ -3962,8 +3960,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3962,8 +3960,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->back = adapter; hw->back = adapter;
adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
/* /* call save state here in standalone driver because it relies on
* call save state here in standalone driver because it relies on
* adapter struct to exist, and needs to call netdev_priv * adapter struct to exist, and needs to call netdev_priv
*/ */
pci_save_state(pdev); pci_save_state(pdev);
...@@ -3978,7 +3975,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3978,7 +3975,7 @@ static int ixgbevf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgbevf_assign_netdev_ops(netdev); ixgbevf_assign_netdev_ops(netdev);
/* Setup hw api */ /* Setup HW API */
memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
hw->mac.type = ii->mac; hw->mac.type = ii->mac;
...@@ -4131,7 +4128,7 @@ static void ixgbevf_remove(struct pci_dev *pdev) ...@@ -4131,7 +4128,7 @@ static void ixgbevf_remove(struct pci_dev *pdev)
* *
* This function is called after a PCI bus error affecting * This function is called after a PCI bus error affecting
* this device has been detected. * this device has been detected.
*/ **/
static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state) pci_channel_state_t state)
{ {
...@@ -4166,7 +4163,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev, ...@@ -4166,7 +4163,7 @@ static pci_ers_result_t ixgbevf_io_error_detected(struct pci_dev *pdev,
* *
* Restart the card from scratch, as if from a cold-boot. Implementation * Restart the card from scratch, as if from a cold-boot. Implementation
* resembles the first-half of the ixgbevf_resume routine. * resembles the first-half of the ixgbevf_resume routine.
*/ **/
static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
...@@ -4194,7 +4191,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev) ...@@ -4194,7 +4191,7 @@ static pci_ers_result_t ixgbevf_io_slot_reset(struct pci_dev *pdev)
* This callback is called when the error recovery driver tells us that * This callback is called when the error recovery driver tells us that
* its OK to resume normal operation. Implementation resembles the * its OK to resume normal operation. Implementation resembles the
* second-half of the ixgbevf_resume routine. * second-half of the ixgbevf_resume routine.
*/ **/
static void ixgbevf_io_resume(struct pci_dev *pdev) static void ixgbevf_io_resume(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
...@@ -4236,6 +4233,7 @@ static struct pci_driver ixgbevf_driver = { ...@@ -4236,6 +4233,7 @@ static struct pci_driver ixgbevf_driver = {
static int __init ixgbevf_init_module(void) static int __init ixgbevf_init_module(void)
{ {
int ret; int ret;
pr_info("%s - version %s\n", ixgbevf_driver_string, pr_info("%s - version %s\n", ixgbevf_driver_string,
ixgbevf_driver_version); ixgbevf_driver_version);
...@@ -4266,6 +4264,7 @@ static void __exit ixgbevf_exit_module(void) ...@@ -4266,6 +4264,7 @@ static void __exit ixgbevf_exit_module(void)
char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw) char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw)
{ {
struct ixgbevf_adapter *adapter = hw->back; struct ixgbevf_adapter *adapter = hw->back;
return adapter->netdev->name; return adapter->netdev->name;
} }
......
/******************************************************************************* /*******************************************************************************
Intel 82599 Virtual Function driver Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2012 Intel Corporation. Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., this program; if not, see <http://www.gnu.org/licenses/>.
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in The full GNU General Public License is included in this distribution in
the file called "COPYING". the file called "COPYING".
...@@ -52,10 +51,10 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw) ...@@ -52,10 +51,10 @@ static s32 ixgbevf_poll_for_msg(struct ixgbe_hw *hw)
} }
/** /**
* ixgbevf_poll_for_ack - Wait for message acknowledgement * ixgbevf_poll_for_ack - Wait for message acknowledgment
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* *
* returns 0 if it successfully received a message acknowledgement * returns 0 if it successfully received a message acknowledgment
**/ **/
static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw) static s32 ixgbevf_poll_for_ack(struct ixgbe_hw *hw)
{ {
...@@ -234,7 +233,7 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw) ...@@ -234,7 +233,7 @@ static s32 ixgbevf_obtain_mbx_lock_vf(struct ixgbe_hw *hw)
/* Take ownership of the buffer */ /* Take ownership of the buffer */
IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU);
/* reserve mailbox for vf use */ /* reserve mailbox for VF use */
if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) if (ixgbevf_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU)
ret_val = 0; ret_val = 0;
...@@ -254,8 +253,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) ...@@ -254,8 +253,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
s32 ret_val; s32 ret_val;
u16 i; u16 i;
/* lock the mailbox to prevent PF/VF race condition */
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbevf_obtain_mbx_lock_vf(hw); ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
if (ret_val) if (ret_val)
goto out_no_write; goto out_no_write;
...@@ -279,7 +277,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) ...@@ -279,7 +277,7 @@ static s32 ixgbevf_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
} }
/** /**
* ixgbevf_read_mbx_vf - Reads a message from the inbox intended for vf * ixgbevf_read_mbx_vf - Reads a message from the inbox intended for VF
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @msg: The message buffer * @msg: The message buffer
* @size: Length of buffer * @size: Length of buffer
...@@ -291,7 +289,7 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) ...@@ -291,7 +289,7 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
s32 ret_val = 0; s32 ret_val = 0;
u16 i; u16 i;
/* lock the mailbox to prevent pf/vf race condition */ /* lock the mailbox to prevent PF/VF race condition */
ret_val = ixgbevf_obtain_mbx_lock_vf(hw); ret_val = ixgbevf_obtain_mbx_lock_vf(hw);
if (ret_val) if (ret_val)
goto out_no_read; goto out_no_read;
...@@ -311,17 +309,18 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size) ...@@ -311,17 +309,18 @@ static s32 ixgbevf_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size)
} }
/** /**
* ixgbevf_init_mbx_params_vf - set initial values for vf mailbox * ixgbevf_init_mbx_params_vf - set initial values for VF mailbox
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* *
* Initializes the hw->mbx struct to correct values for vf mailbox * Initializes the hw->mbx struct to correct values for VF mailbox
*/ */
static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw) static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
{ {
struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mbx_info *mbx = &hw->mbx;
/* start mailbox as timed out and let the reset_hw call set the timeout /* start mailbox as timed out and let the reset_hw call set the timeout
* value to begin communications */ * value to begin communications
*/
mbx->timeout = 0; mbx->timeout = 0;
mbx->udelay = IXGBE_VF_MBX_INIT_DELAY; mbx->udelay = IXGBE_VF_MBX_INIT_DELAY;
......
/******************************************************************************* /*******************************************************************************
Intel 82599 Virtual Function driver Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2012 Intel Corporation. Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., this program; if not, see <http://www.gnu.org/licenses/>.
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in The full GNU General Public License is included in this distribution in
the file called "COPYING". the file called "COPYING".
...@@ -61,25 +60,23 @@ ...@@ -61,25 +60,23 @@
#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ #define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */
#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ #define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */
/* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the
* PF. The reverse is true if it is IXGBE_PF_*. * PF. The reverse is true if it is IXGBE_PF_*.
* Message ACK's are the value or'd with 0xF0000000 * Message ACK's are the value or'd with 0xF0000000
*/ */
#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with /* Messages below or'd with this are the ACK */
* this are the ACK */ #define IXGBE_VT_MSGTYPE_ACK 0x80000000
#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with /* Messages below or'd with this are the NACK */
* this are the NACK */ #define IXGBE_VT_MSGTYPE_NACK 0x40000000
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still /* Indicates that VF is still clear to send requests */
* clear to send requests */ #define IXGBE_VT_MSGTYPE_CTS 0x20000000
#define IXGBE_VT_MSGINFO_SHIFT 16 #define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */ /* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */ /* definitions to support mailbox API version negotiation */
/* /* each element denotes a version of the API; existing numbers may not
* each element denotes a version of the API; existing numbers may not
* change; any additions must go at the end * change; any additions must go at the end
*/ */
enum ixgbe_pfvf_api_rev { enum ixgbe_pfvf_api_rev {
...@@ -107,7 +104,7 @@ enum ixgbe_pfvf_api_rev { ...@@ -107,7 +104,7 @@ enum ixgbe_pfvf_api_rev {
/* GET_QUEUES return data indices within the mailbox */ /* GET_QUEUES return data indices within the mailbox */
#define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ #define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */
#define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ #define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */
#define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ #define IXGBE_VF_TRANS_VLAN 3 /* Indication of port VLAN */
#define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */
/* length of permanent address message returned from PF */ /* length of permanent address message returned from PF */
......
/******************************************************************************* /*******************************************************************************
Intel 82599 Virtual Function driver Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2014 Intel Corporation. Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., this program; if not, see <http://www.gnu.org/licenses/>.
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in The full GNU General Public License is included in this distribution in
the file called "COPYING". the file called "COPYING".
......
/******************************************************************************* /*******************************************************************************
Intel 82599 Virtual Function driver Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2012 Intel Corporation. Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., this program; if not, see <http://www.gnu.org/licenses/>.
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in The full GNU General Public License is included in this distribution in
the file called "COPYING". the file called "COPYING".
...@@ -102,9 +101,10 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) ...@@ -102,9 +101,10 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
mdelay(10); mdelay(10);
/* set our "perm_addr" based on info provided by PF */ /* set our "perm_addr" based on info provided by PF
/* also set up the mc_filter_type which is piggy backed * also set up the mc_filter_type which is piggy backed
* on the mac address in word 3 */ * on the mac address in word 3
*/
ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN); ret_val = mbx->ops.read_posted(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
if (ret_val) if (ret_val)
return ret_val; return ret_val;
...@@ -117,7 +117,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw) ...@@ -117,7 +117,7 @@ static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK)) msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_NACK))
return IXGBE_ERR_INVALID_MAC_ADDR; return IXGBE_ERR_INVALID_MAC_ADDR;
memcpy(hw->mac.perm_addr, addr, ETH_ALEN); ether_addr_copy(hw->mac.perm_addr, addr);
hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD]; hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
return 0; return 0;
...@@ -138,8 +138,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) ...@@ -138,8 +138,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
u32 reg_val; u32 reg_val;
u16 i; u16 i;
/* /* Set the adapter_stopped flag so other driver functions stop touching
* Set the adapter_stopped flag so other driver functions stop touching
* the hardware * the hardware
*/ */
hw->adapter_stopped = true; hw->adapter_stopped = true;
...@@ -182,7 +181,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw) ...@@ -182,7 +181,7 @@ static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
* *
* Extracts the 12 bits, from a multicast address, to determine which * Extracts the 12 bits, from a multicast address, to determine which
* bit-vector to set in the multicast table. The hardware uses 12 bits, from * bit-vector to set in the multicast table. The hardware uses 12 bits, from
* incoming rx multicast addresses, to determine the bit-vector to check in * incoming Rx multicast addresses, to determine the bit-vector to check in
* the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
* by the MO field of the MCSTCTRL. The MO field is set during initialization * by the MO field of the MCSTCTRL. The MO field is set during initialization
* to mc_filter_type. * to mc_filter_type.
...@@ -220,7 +219,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) ...@@ -220,7 +219,7 @@ static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
**/ **/
static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr) static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
{ {
memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN); ether_addr_copy(mac_addr, hw->mac.perm_addr);
return 0; return 0;
} }
...@@ -233,8 +232,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) ...@@ -233,8 +232,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
s32 ret_val; s32 ret_val;
memset(msgbuf, 0, sizeof(msgbuf)); memset(msgbuf, 0, sizeof(msgbuf));
/* /* If index is one then this is the start of a new list and needs
* If index is one then this is the start of a new list and needs
* indication to the PF so it can do it's own list management. * indication to the PF so it can do it's own list management.
* If it is zero then that tells the PF to just clear all of * If it is zero then that tells the PF to just clear all of
* this VF's macvlans and there is no new list. * this VF's macvlans and there is no new list.
...@@ -242,7 +240,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) ...@@ -242,7 +240,7 @@ static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT; msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
msgbuf[0] |= IXGBE_VF_SET_MACVLAN; msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
if (addr) if (addr)
memcpy(msg_addr, addr, ETH_ALEN); ether_addr_copy(msg_addr, addr);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3); ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val) if (!ret_val)
...@@ -275,7 +273,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, ...@@ -275,7 +273,7 @@ static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
memset(msgbuf, 0, sizeof(msgbuf)); memset(msgbuf, 0, sizeof(msgbuf));
msgbuf[0] = IXGBE_VF_SET_MAC_ADDR; msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
memcpy(msg_addr, addr, ETH_ALEN); ether_addr_copy(msg_addr, addr);
ret_val = mbx->ops.write_posted(hw, msgbuf, 3); ret_val = mbx->ops.write_posted(hw, msgbuf, 3);
if (!ret_val) if (!ret_val)
...@@ -348,7 +346,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw, ...@@ -348,7 +346,7 @@ static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
} }
/** /**
* ixgbevf_set_vfta_vf - Set/Unset vlan filter table address * ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
* @hw: pointer to the HW structure * @hw: pointer to the HW structure
* @vlan: 12 bit VLAN ID * @vlan: 12 bit VLAN ID
* @vind: unused by VF drivers * @vind: unused by VF drivers
...@@ -462,7 +460,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, ...@@ -462,7 +460,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
} }
/* if the read failed it could just be a mailbox collision, best wait /* if the read failed it could just be a mailbox collision, best wait
* until we are called again and don't report an error */ * until we are called again and don't report an error
*/
if (mbx->ops.read(hw, &in_msg, 1)) if (mbx->ops.read(hw, &in_msg, 1))
goto out; goto out;
...@@ -480,7 +479,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw, ...@@ -480,7 +479,8 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
} }
/* if we passed all the tests above then the link is up and we no /* if we passed all the tests above then the link is up and we no
* longer need to check for link */ * longer need to check for link
*/
mac->get_link_status = false; mac->get_link_status = false;
out: out:
...@@ -561,8 +561,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, ...@@ -561,8 +561,7 @@ int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
if (!err) { if (!err) {
msg[0] &= ~IXGBE_VT_MSGTYPE_CTS; msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
/* /* if we we didn't get an ACK there must have been
* if we we didn't get an ACK there must have been
* some sort of mailbox error so we should treat it * some sort of mailbox error so we should treat it
* as such * as such
*/ */
......
/******************************************************************************* /*******************************************************************************
Intel 82599 Virtual Function driver Intel 82599 Virtual Function driver
Copyright(c) 1999 - 2014 Intel Corporation. Copyright(c) 1999 - 2015 Intel Corporation.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License, under the terms and conditions of the GNU General Public License,
...@@ -13,8 +13,7 @@ ...@@ -13,8 +13,7 @@
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., this program; if not, see <http://www.gnu.org/licenses/>.
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in The full GNU General Public License is included in this distribution in
the file called "COPYING". the file called "COPYING".
...@@ -185,6 +184,7 @@ static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) ...@@ -185,6 +184,7 @@ static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
return; return;
writel(value, reg_addr + reg); writel(value, reg_addr + reg);
} }
#define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v) #define IXGBE_WRITE_REG(h, r, v) ixgbe_write_reg(h, r, v)
u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg); u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg);
...@@ -195,6 +195,7 @@ static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg, ...@@ -195,6 +195,7 @@ static inline void ixgbe_write_reg_array(struct ixgbe_hw *hw, u32 reg,
{ {
ixgbe_write_reg(hw, reg + (offset << 2), value); ixgbe_write_reg(hw, reg + (offset << 2), value);
} }
#define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v) #define IXGBE_WRITE_REG_ARRAY(h, r, o, v) ixgbe_write_reg_array(h, r, o, v)
static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
...@@ -202,6 +203,7 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg, ...@@ -202,6 +203,7 @@ static inline u32 ixgbe_read_reg_array(struct ixgbe_hw *hw, u32 reg,
{ {
return ixgbevf_read_reg(hw, reg + (offset << 2)); return ixgbevf_read_reg(hw, reg + (offset << 2));
} }
#define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o) #define IXGBE_READ_REG_ARRAY(h, r, o) ixgbe_read_reg_array(h, r, o)
void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size);
...@@ -209,4 +211,3 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); ...@@ -209,4 +211,3 @@ int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api);
int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
unsigned int *default_tc); unsigned int *default_tc);
#endif /* __IXGBE_VF_H__ */ #endif /* __IXGBE_VF_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment