Commit d8d96caa authored by Ganesh Venkatesan's avatar Ganesh Venkatesan Committed by Jeff Garzik

[PATCH] ixgb: White space corrections

Signed-off-by: default avatarGanesh Venkatesan <ganesh.venkatesan@intel.com>
parent 0acd99c0
......@@ -196,4 +196,4 @@ struct ixgb_adapter {
boolean_t have_msi;
#endif
};
#endif /* _IXGB_H_ */
#endif /* _IXGB_H_ */
......@@ -32,7 +32,8 @@
static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw);
static void ixgb_shift_out_bits(struct ixgb_hw *hw,
uint16_t data, uint16_t count);
uint16_t data,
uint16_t count);
static void ixgb_standby_eeprom(struct ixgb_hw *hw);
static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw);
......@@ -45,7 +46,9 @@ static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
* hw - Struct containing variables accessed by shared code
* eecd_reg - EECD's current value
*****************************************************************************/
static void ixgb_raise_clock(struct ixgb_hw *hw, uint32_t * eecd_reg)
static void
ixgb_raise_clock(struct ixgb_hw *hw,
uint32_t *eecd_reg)
{
/* Raise the clock input to the EEPROM (by setting the SK bit), and then
* wait 50 microseconds.
......@@ -62,7 +65,9 @@ static void ixgb_raise_clock(struct ixgb_hw *hw, uint32_t * eecd_reg)
* hw - Struct containing variables accessed by shared code
* eecd_reg - EECD's current value
*****************************************************************************/
static void ixgb_lower_clock(struct ixgb_hw *hw, uint32_t * eecd_reg)
static void
ixgb_lower_clock(struct ixgb_hw *hw,
uint32_t *eecd_reg)
{
/* Lower the clock input to the EEPROM (by clearing the SK bit), and then
* wait 50 microseconds.
......@@ -81,7 +86,9 @@ static void ixgb_lower_clock(struct ixgb_hw *hw, uint32_t * eecd_reg)
* count - number of bits to shift out
*****************************************************************************/
static void
ixgb_shift_out_bits(struct ixgb_hw *hw, uint16_t data, uint16_t count)
ixgb_shift_out_bits(struct ixgb_hw *hw,
uint16_t data,
uint16_t count)
{
uint32_t eecd_reg;
uint32_t mask;
......@@ -101,7 +108,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, uint16_t data, uint16_t count)
*/
eecd_reg &= ~IXGB_EECD_DI;
if (data & mask)
if(data & mask)
eecd_reg |= IXGB_EECD_DI;
IXGB_WRITE_REG(hw, EECD, eecd_reg);
......@@ -113,7 +120,7 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, uint16_t data, uint16_t count)
mask = mask >> 1;
} while (mask);
} while(mask);
/* We leave the "DI" bit set to "0" when we leave this routine. */
eecd_reg &= ~IXGB_EECD_DI;
......@@ -126,7 +133,8 @@ ixgb_shift_out_bits(struct ixgb_hw *hw, uint16_t data, uint16_t count)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw)
static uint16_t
ixgb_shift_in_bits(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
uint32_t i;
......@@ -144,14 +152,14 @@ static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw)
eecd_reg &= ~(IXGB_EECD_DO | IXGB_EECD_DI);
data = 0;
for (i = 0; i < 16; i++) {
for(i = 0; i < 16; i++) {
data = data << 1;
ixgb_raise_clock(hw, &eecd_reg);
eecd_reg = IXGB_READ_REG(hw, EECD);
eecd_reg &= ~(IXGB_EECD_DI);
if (eecd_reg & IXGB_EECD_DO)
if(eecd_reg & IXGB_EECD_DO)
data |= 1;
ixgb_lower_clock(hw, &eecd_reg);
......@@ -168,7 +176,8 @@ static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw)
* Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
* function should be called before issuing a command to the EEPROM.
*****************************************************************************/
static void ixgb_setup_eeprom(struct ixgb_hw *hw)
static void
ixgb_setup_eeprom(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
......@@ -189,7 +198,8 @@ static void ixgb_setup_eeprom(struct ixgb_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void ixgb_standby_eeprom(struct ixgb_hw *hw)
static void
ixgb_standby_eeprom(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
......@@ -222,7 +232,8 @@ static void ixgb_standby_eeprom(struct ixgb_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void ixgb_clock_eeprom(struct ixgb_hw *hw)
static void
ixgb_clock_eeprom(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
......@@ -245,7 +256,8 @@ static void ixgb_clock_eeprom(struct ixgb_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void ixgb_cleanup_eeprom(struct ixgb_hw *hw)
static void
ixgb_cleanup_eeprom(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
......@@ -270,7 +282,8 @@ static void ixgb_cleanup_eeprom(struct ixgb_hw *hw)
* TRUE: EEPROM data pin is high before timeout.
* FALSE: Time expired.
*****************************************************************************/
static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw)
static boolean_t
ixgb_wait_eeprom_command(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
uint32_t i;
......@@ -284,10 +297,10 @@ static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw)
* signal that the command has been completed by raising the DO signal.
* If DO does not go high in 10 milliseconds, then error out.
*/
for (i = 0; i < 200; i++) {
for(i = 0; i < 200; i++) {
eecd_reg = IXGB_READ_REG(hw, EECD);
if (eecd_reg & IXGB_EECD_DO)
if(eecd_reg & IXGB_EECD_DO)
return (TRUE);
udelay(50);
......@@ -309,15 +322,16 @@ static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw)
* TRUE: Checksum is valid
* FALSE: Checksum is not valid.
*****************************************************************************/
boolean_t ixgb_validate_eeprom_checksum(struct ixgb_hw * hw)
boolean_t
ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
{
uint16_t checksum = 0;
uint16_t i;
for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
checksum += ixgb_read_eeprom(hw, i);
if (checksum == (uint16_t) EEPROM_SUM)
if(checksum == (uint16_t) EEPROM_SUM)
return (TRUE);
else
return (FALSE);
......@@ -331,12 +345,13 @@ boolean_t ixgb_validate_eeprom_checksum(struct ixgb_hw * hw)
* Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
* Writes the difference to word offset 63 of the EEPROM.
*****************************************************************************/
void ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
void
ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
{
uint16_t checksum = 0;
uint16_t i;
for (i = 0; i < EEPROM_CHECKSUM_REG; i++)
for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
checksum += ixgb_read_eeprom(hw, i);
checksum = (uint16_t) EEPROM_SUM - checksum;
......@@ -356,7 +371,10 @@ void ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
* EEPROM will most likely contain an invalid checksum.
*
*****************************************************************************/
void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
void
ixgb_write_eeprom(struct ixgb_hw *hw,
uint16_t offset,
uint16_t data)
{
/* Prepare the EEPROM for writing */
ixgb_setup_eeprom(hw);
......@@ -404,7 +422,9 @@ void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
* Returns:
* The 16-bit value read from the eeprom
*****************************************************************************/
uint16_t ixgb_read_eeprom(struct ixgb_hw * hw, uint16_t offset)
uint16_t
ixgb_read_eeprom(struct ixgb_hw *hw,
uint16_t offset)
{
uint16_t data;
......@@ -437,7 +457,8 @@ uint16_t ixgb_read_eeprom(struct ixgb_hw * hw, uint16_t offset)
* TRUE: if eeprom read is successful
* FALSE: otherwise.
*****************************************************************************/
boolean_t ixgb_get_eeprom_data(struct ixgb_hw * hw)
boolean_t
ixgb_get_eeprom_data(struct ixgb_hw *hw)
{
uint16_t i;
uint16_t checksum = 0;
......@@ -448,7 +469,7 @@ boolean_t ixgb_get_eeprom_data(struct ixgb_hw * hw)
ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
DEBUGOUT("ixgb_ee: Reading eeprom data\n");
for (i = 0; i < IXGB_EEPROM_SIZE; i++) {
for(i = 0; i < IXGB_EEPROM_SIZE ; i++) {
uint16_t ee_data;
ee_data = ixgb_read_eeprom(hw, i);
checksum += ee_data;
......@@ -461,12 +482,12 @@ boolean_t ixgb_get_eeprom_data(struct ixgb_hw * hw)
}
if ((ee_map->init_ctrl_reg_1 & le16_to_cpu(EEPROM_ICW1_SIGNATURE_MASK))
!= le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) {
!= le16_to_cpu(EEPROM_ICW1_SIGNATURE_VALID)) {
DEBUGOUT("ixgb_ee: Signature invalid.\n");
return (FALSE);
return(FALSE);
}
return (TRUE);
return(TRUE);
}
/******************************************************************************
......@@ -479,7 +500,8 @@ boolean_t ixgb_get_eeprom_data(struct ixgb_hw * hw)
* TRUE: eeprom signature was good and the eeprom read was successful
* FALSE: otherwise.
******************************************************************************/
static boolean_t ixgb_check_and_get_eeprom_data(struct ixgb_hw *hw)
static boolean_t
ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
......@@ -500,15 +522,16 @@ static boolean_t ixgb_check_and_get_eeprom_data(struct ixgb_hw *hw)
* Returns:
* Word at indexed offset in eeprom, if valid, 0 otherwise.
******************************************************************************/
uint16_t ixgb_get_eeprom_word(struct ixgb_hw * hw, uint16_t index)
uint16_t
ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
{
if ((index < IXGB_EEPROM_SIZE) &&
(ixgb_check_and_get_eeprom_data(hw) == TRUE)) {
return (hw->eeprom[index]);
(ixgb_check_and_get_eeprom_data(hw) == TRUE)) {
return(hw->eeprom[index]);
}
return (0);
return(0);
}
/******************************************************************************
......@@ -519,7 +542,9 @@ uint16_t ixgb_get_eeprom_word(struct ixgb_hw * hw, uint16_t index)
*
* Returns: None.
******************************************************************************/
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t * mac_addr)
void
ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
uint8_t *mac_addr)
{
int i;
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
......@@ -542,14 +567,15 @@ void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t * mac_addr)
* Returns:
* compatibility flags if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw)
uint16_t
ixgb_get_ee_compatibility(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->compatibility);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->compatibility);
return (0);
return(0);
}
/******************************************************************************
......@@ -560,13 +586,14 @@ uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw)
* Returns:
* PBA number if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint32_t ixgb_get_ee_pba_number(struct ixgb_hw * hw)
uint32_t
ixgb_get_ee_pba_number(struct ixgb_hw *hw)
{
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
| (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG]) << 16));
| (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16));
return (0);
return(0);
}
/******************************************************************************
......@@ -577,14 +604,15 @@ uint32_t ixgb_get_ee_pba_number(struct ixgb_hw * hw)
* Returns:
* Initialization Control Word 1 if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw * hw)
uint16_t
ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->init_ctrl_reg_1);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->init_ctrl_reg_1);
return (0);
return(0);
}
/******************************************************************************
......@@ -595,14 +623,15 @@ uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw * hw)
* Returns:
* Initialization Control Word 2 if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint16_t ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw * hw)
uint16_t
ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->init_ctrl_reg_2);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->init_ctrl_reg_2);
return (0);
return(0);
}
/******************************************************************************
......@@ -613,14 +642,15 @@ uint16_t ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw * hw)
* Returns:
* Subsystem Id if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint16_t ixgb_get_ee_subsystem_id(struct ixgb_hw * hw)
uint16_t
ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->subsystem_id);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->subsystem_id);
return (0);
return(0);
}
/******************************************************************************
......@@ -631,14 +661,15 @@ uint16_t ixgb_get_ee_subsystem_id(struct ixgb_hw * hw)
* Returns:
* Sub Vendor Id if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint16_t ixgb_get_ee_subvendor_id(struct ixgb_hw * hw)
uint16_t
ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->subvendor_id);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->subvendor_id);
return (0);
return(0);
}
/******************************************************************************
......@@ -649,14 +680,15 @@ uint16_t ixgb_get_ee_subvendor_id(struct ixgb_hw * hw)
* Returns:
* Device Id if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint16_t ixgb_get_ee_device_id(struct ixgb_hw * hw)
uint16_t
ixgb_get_ee_device_id(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->device_id);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->device_id);
return (0);
return(0);
}
/******************************************************************************
......@@ -667,14 +699,15 @@ uint16_t ixgb_get_ee_device_id(struct ixgb_hw * hw)
* Returns:
* Device Id if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint16_t ixgb_get_ee_vendor_id(struct ixgb_hw * hw)
uint16_t
ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->vendor_id);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->vendor_id);
return (0);
return(0);
}
/******************************************************************************
......@@ -685,14 +718,15 @@ uint16_t ixgb_get_ee_vendor_id(struct ixgb_hw * hw)
* Returns:
* SDP Register if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint16_t ixgb_get_ee_swdpins_reg(struct ixgb_hw * hw)
uint16_t
ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->swdpins_reg);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->swdpins_reg);
return (0);
return(0);
}
/******************************************************************************
......@@ -703,14 +737,15 @@ uint16_t ixgb_get_ee_swdpins_reg(struct ixgb_hw * hw)
* Returns:
* D3 Power Management Bits if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint8_t ixgb_get_ee_d3_power(struct ixgb_hw * hw)
uint8_t
ixgb_get_ee_d3_power(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->d3_power);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->d3_power);
return (0);
return(0);
}
/******************************************************************************
......@@ -721,12 +756,13 @@ uint8_t ixgb_get_ee_d3_power(struct ixgb_hw * hw)
* Returns:
* D0 Power Management Bits if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint8_t ixgb_get_ee_d0_power(struct ixgb_hw * hw)
uint8_t
ixgb_get_ee_d0_power(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if (ixgb_check_and_get_eeprom_data(hw) == TRUE)
return (ee_map->d0_power);
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->d0_power);
return (0);
return(0);
}
......@@ -103,7 +103,7 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ecmd->port = PORT_FIBRE;
ecmd->transceiver = XCVR_EXTERNAL;
if (netif_carrier_ok(adapter->netdev)) {
if(netif_carrier_ok(adapter->netdev)) {
ecmd->speed = SPEED_10000;
ecmd->duplex = DUPLEX_FULL;
} else {
......@@ -119,8 +119,8 @@ static int
ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
{
struct ixgb_adapter *adapter = netdev->priv;
if (ecmd->autoneg == AUTONEG_ENABLE ||
ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
if(ecmd->autoneg == AUTONEG_ENABLE ||
ecmd->speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)
return -EINVAL;
if(netif_running(adapter->netdev)) {
......@@ -279,8 +279,8 @@ ixgb_get_regs(struct net_device *netdev,
*reg++ = IXGB_READ_REG(hw, RXCSUM); /* 20 */
for (i = 0; i < IXGB_RAR_ENTRIES; i++) {
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
*reg++ = IXGB_READ_REG_ARRAY(hw, RAL, (i << 1)); /*21,...,51 */
*reg++ = IXGB_READ_REG_ARRAY(hw, RAH, (i << 1)); /*22,...,52 */
}
/* Transmit */
......@@ -469,11 +469,11 @@ ixgb_set_eeprom(struct net_device *netdev,
eeprom_buff[0] = ixgb_read_eeprom(hw, first_word);
ptr++;
}
if ((eeprom->offset + eeprom->len) & 1) {
if((eeprom->offset + eeprom->len) & 1) {
/* need read/modify/write of last changed EEPROM word */
/* only the first byte of the word is being modified */
eeprom_buff[last_word - first_word]
= ixgb_read_eeprom(hw, last_word);
eeprom_buff[last_word - first_word]
= ixgb_read_eeprom(hw, last_word);
}
memcpy(ptr, bytes, eeprom->len);
......@@ -481,7 +481,7 @@ ixgb_set_eeprom(struct net_device *netdev,
ixgb_write_eeprom(hw, first_word + i, eeprom_buff[i]);
/* Update the checksum over the first part of the EEPROM if needed */
if (first_word <= EEPROM_CHECKSUM_REG)
if(first_word <= EEPROM_CHECKSUM_REG)
ixgb_update_eeprom_checksum(hw);
kfree(eeprom_buff);
......@@ -586,11 +586,12 @@ ixgb_set_ringparam(struct net_device *netdev,
/* bit defines for adapter->led_status */
#define IXGB_LED_ON 0
static void ixgb_led_blink_callback(unsigned long data)
static void
ixgb_led_blink_callback(unsigned long data)
{
struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
if (test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
if(test_and_change_bit(IXGB_LED_ON, &adapter->led_status))
ixgb_led_off(&adapter->hw);
else
ixgb_led_on(&adapter->hw);
......
......@@ -53,9 +53,14 @@ uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
{
uint32_t ctrl_reg;
ctrl_reg = IXGB_CTRL0_RST | IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
IXGB_CTRL0_SDP2_DIR | IXGB_CTRL0_SDP1_DIR | IXGB_CTRL0_SDP0_DIR | IXGB_CTRL0_SDP3 | /* Initial value 1101 */
IXGB_CTRL0_SDP2 | IXGB_CTRL0_SDP0;
ctrl_reg = IXGB_CTRL0_RST |
IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
IXGB_CTRL0_SDP2_DIR |
IXGB_CTRL0_SDP1_DIR |
IXGB_CTRL0_SDP0_DIR |
IXGB_CTRL0_SDP3 | /* Initial value 1101 */
IXGB_CTRL0_SDP2 |
IXGB_CTRL0_SDP0;
#ifdef HP_ZX1
/* Workaround for 82597EX reset errata */
......@@ -84,7 +89,8 @@ uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
boolean_t ixgb_adapter_stop(struct ixgb_hw * hw)
boolean_t
ixgb_adapter_stop(struct ixgb_hw *hw)
{
uint32_t ctrl_reg;
uint32_t icr_reg;
......@@ -94,7 +100,7 @@ boolean_t ixgb_adapter_stop(struct ixgb_hw * hw)
/* If we are stopped or resetting exit gracefully and wait to be
* started again before accessing the hardware.
*/
if (hw->adapter_stopped) {
if(hw->adapter_stopped) {
DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
return FALSE;
}
......@@ -135,6 +141,7 @@ boolean_t ixgb_adapter_stop(struct ixgb_hw * hw)
return (ctrl_reg & IXGB_CTRL0_RST);
}
/******************************************************************************
* Identifies the vendor of the optics module on the adapter. The SR adapters
* support two different types of XPAK optics, so it is necessary to determine
......@@ -144,7 +151,8 @@ boolean_t ixgb_adapter_stop(struct ixgb_hw * hw)
*
* Returns: the vendor of the XPAK optics module.
*****************************************************************************/
static ixgb_xpak_vendor ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
static ixgb_xpak_vendor
ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
{
uint32_t i;
uint16_t vendor_name[5];
......@@ -183,7 +191,8 @@ static ixgb_xpak_vendor ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
*
* Returns: the phy type of the adapter.
*****************************************************************************/
static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw)
static ixgb_phy_type
ixgb_identify_phy(struct ixgb_hw *hw)
{
ixgb_phy_type phy_type;
ixgb_xpak_vendor xpak_vendor;
......@@ -242,7 +251,8 @@ static ixgb_phy_type ixgb_identify_phy(struct ixgb_hw *hw)
* TRUE if successful,
* FALSE if unrecoverable problems were encountered.
*****************************************************************************/
boolean_t ixgb_init_hw(struct ixgb_hw * hw)
boolean_t
ixgb_init_hw(struct ixgb_hw *hw)
{
uint32_t i;
uint32_t ctrl_reg;
......@@ -271,7 +281,7 @@ boolean_t ixgb_init_hw(struct ixgb_hw * hw)
msec_delay(IXGB_DELAY_AFTER_EE_RESET);
if (ixgb_get_eeprom_data(hw) == FALSE) {
return (FALSE);
return(FALSE);
}
/* Use the device id to determine the type of phy/transceiver. */
......@@ -289,7 +299,7 @@ boolean_t ixgb_init_hw(struct ixgb_hw * hw)
*/
if (!mac_addr_valid(hw->curr_mac_addr)) {
DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
return (FALSE);
return(FALSE);
}
/* tell the routines in this file they can access hardware again */
......@@ -300,7 +310,7 @@ boolean_t ixgb_init_hw(struct ixgb_hw * hw)
/* Zero out the Multicast HASH table */
DEBUGOUT("Zeroing the MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++)
for(i = 0; i < IXGB_MC_TBL_SIZE; i++)
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
/* Zero out the VLAN Filter Table Array */
......@@ -327,7 +337,8 @@ boolean_t ixgb_init_hw(struct ixgb_hw * hw)
* of the receive addresss registers. Clears the multicast table. Assumes
* the receiver is in reset when the routine is called.
*****************************************************************************/
void ixgb_init_rx_addrs(struct ixgb_hw *hw)
void
ixgb_init_rx_addrs(struct ixgb_hw *hw)
{
uint32_t i;
......@@ -365,7 +376,7 @@ void ixgb_init_rx_addrs(struct ixgb_hw *hw)
/* Zero out the other 15 receive addresses. */
DEBUGOUT("Clearing RAR[1-15]\n");
for (i = 1; i < IXGB_RAR_ENTRIES; i++) {
for(i = 1; i < IXGB_RAR_ENTRIES; i++) {
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
}
......@@ -388,12 +399,13 @@ void ixgb_init_rx_addrs(struct ixgb_hw *hw)
*****************************************************************************/
void
ixgb_mc_addr_list_update(struct ixgb_hw *hw,
uint8_t * mc_addr_list,
uint32_t mc_addr_count, uint32_t pad)
uint8_t *mc_addr_list,
uint32_t mc_addr_count,
uint32_t pad)
{
uint32_t hash_value;
uint32_t i;
uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */
uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */
DEBUGFUNC("ixgb_mc_addr_list_update");
......@@ -402,19 +414,19 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
/* Clear RAR[1-15] */
DEBUGOUT(" Clearing RAR[1-15]\n");
for (i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
for(i = rar_used_count; i < IXGB_RAR_ENTRIES; i++) {
IXGB_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
IXGB_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
}
/* Clear the MTA */
DEBUGOUT(" Clearing MTA\n");
for (i = 0; i < IXGB_MC_TBL_SIZE; i++) {
for(i = 0; i < IXGB_MC_TBL_SIZE; i++) {
IXGB_WRITE_REG_ARRAY(hw, MTA, i, 0);
}
/* Add the new addresses */
for (i = 0; i < mc_addr_count; i++) {
for(i = 0; i < mc_addr_count; i++) {
DEBUGOUT(" Adding the multicast addresses:\n");
DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
mc_addr_list[i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)],
......@@ -432,7 +444,7 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
/* Place this multicast address in the RAR if there is room, *
* else put it in the MTA
*/
if (rar_used_count < IXGB_RAR_ENTRIES) {
if(rar_used_count < IXGB_RAR_ENTRIES) {
ixgb_rar_set(hw,
mc_addr_list +
(i * (IXGB_ETH_LENGTH_OF_ADDRESS + pad)),
......@@ -465,7 +477,9 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
* Returns:
* The hash value
*****************************************************************************/
static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr)
static uint32_t
ixgb_hash_mc_addr(struct ixgb_hw *hw,
uint8_t *mc_addr)
{
uint32_t hash_value = 0;
......@@ -511,7 +525,9 @@ static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr)
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*****************************************************************************/
static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value)
static void
ixgb_mta_set(struct ixgb_hw *hw,
uint32_t hash_value)
{
uint32_t hash_bit, hash_reg;
uint32_t mta_reg;
......@@ -543,7 +559,10 @@ static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value)
* addr - Address to put into receive address register
* index - Receive address register to write
*****************************************************************************/
void ixgb_rar_set(struct ixgb_hw *hw, uint8_t * addr, uint32_t index)
void
ixgb_rar_set(struct ixgb_hw *hw,
uint8_t *addr,
uint32_t index)
{
uint32_t rar_low, rar_high;
......@@ -553,11 +572,13 @@ void ixgb_rar_set(struct ixgb_hw *hw, uint8_t * addr, uint32_t index)
* from network order (big endian) to little endian
*/
rar_low = ((uint32_t) addr[0] |
((uint32_t) addr[1] << 8) |
((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24));
((uint32_t)addr[1] << 8) |
((uint32_t)addr[2] << 16) |
((uint32_t)addr[3] << 24));
rar_high = ((uint32_t) addr[4] |
((uint32_t) addr[5] << 8) | IXGB_RAH_AV);
((uint32_t)addr[5] << 8) |
IXGB_RAH_AV);
IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
IXGB_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
......@@ -571,7 +592,10 @@ void ixgb_rar_set(struct ixgb_hw *hw, uint8_t * addr, uint32_t index)
* offset - Offset in VLAN filer table to write
* value - Value to write into VLAN filter table
*****************************************************************************/
void ixgb_write_vfta(struct ixgb_hw *hw, uint32_t offset, uint32_t value)
void
ixgb_write_vfta(struct ixgb_hw *hw,
uint32_t offset,
uint32_t value)
{
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
return;
......@@ -582,11 +606,12 @@ void ixgb_write_vfta(struct ixgb_hw *hw, uint32_t offset, uint32_t value)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
void ixgb_clear_vfta(struct ixgb_hw *hw)
void
ixgb_clear_vfta(struct ixgb_hw *hw)
{
uint32_t offset;
for (offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
return;
}
......@@ -597,10 +622,11 @@ void ixgb_clear_vfta(struct ixgb_hw *hw)
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
boolean_t ixgb_setup_fc(struct ixgb_hw * hw)
boolean_t
ixgb_setup_fc(struct ixgb_hw *hw)
{
uint32_t ctrl_reg;
uint32_t pap_reg = 0; /* by default, assume no pause time */
uint32_t pap_reg = 0; /* by default, assume no pause time */
boolean_t status = TRUE;
DEBUGFUNC("ixgb_setup_fc");
......@@ -665,16 +691,16 @@ boolean_t ixgb_setup_fc(struct ixgb_hw * hw)
* ability to transmit pause frames in not enabled, then these
* registers will be set to 0.
*/
if (!(hw->fc.type & ixgb_fc_tx_pause)) {
if(!(hw->fc.type & ixgb_fc_tx_pause)) {
IXGB_WRITE_REG(hw, FCRTL, 0);
IXGB_WRITE_REG(hw, FCRTH, 0);
} else {
/* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of XON frames.
*/
if (hw->fc.send_xon) {
/* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of XON
* frames. */
if(hw->fc.send_xon) {
IXGB_WRITE_REG(hw, FCRTL,
(hw->fc.low_water | IXGB_FCRTL_XONE));
(hw->fc.low_water | IXGB_FCRTL_XONE));
} else {
IXGB_WRITE_REG(hw, FCRTL, hw->fc.low_water);
}
......@@ -699,9 +725,10 @@ boolean_t ixgb_setup_fc(struct ixgb_hw * hw)
* read command.
*****************************************************************************/
uint16_t
ixgb_read_phy_reg(struct ixgb_hw * hw,
uint32_t reg_address,
uint32_t phy_address, uint32_t device_type)
ixgb_read_phy_reg(struct ixgb_hw *hw,
uint32_t reg_address,
uint32_t phy_address,
uint32_t device_type)
{
uint32_t i;
uint32_t data;
......@@ -726,7 +753,8 @@ ixgb_read_phy_reg(struct ixgb_hw * hw,
** from the CPU Write to the Ready bit assertion.
**************************************************************/
for (i = 0; i < 10; i++) {
for(i = 0; i < 10; i++)
{
udelay(10);
command = IXGB_READ_REG(hw, MSCA);
......@@ -752,7 +780,8 @@ ixgb_read_phy_reg(struct ixgb_hw * hw,
** from the CPU Write to the Ready bit assertion.
**************************************************************/
for (i = 0; i < 10; i++) {
for(i = 0; i < 10; i++)
{
udelay(10);
command = IXGB_READ_REG(hw, MSCA);
......@@ -768,7 +797,7 @@ ixgb_read_phy_reg(struct ixgb_hw * hw,
*/
data = IXGB_READ_REG(hw, MSRWD);
data >>= IXGB_MSRWD_READ_DATA_SHIFT;
return ((uint16_t) data);
return((uint16_t) data);
}
/******************************************************************************
......@@ -790,8 +819,10 @@ ixgb_read_phy_reg(struct ixgb_hw * hw,
*****************************************************************************/
void
ixgb_write_phy_reg(struct ixgb_hw *hw,
uint32_t reg_address,
uint32_t phy_address, uint32_t device_type, uint16_t data)
uint32_t reg_address,
uint32_t phy_address,
uint32_t device_type,
uint16_t data)
{
uint32_t i;
uint32_t command = 0;
......@@ -801,24 +832,25 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
/* Put the data in the MDIO Read/Write Data register */
IXGB_WRITE_REG(hw, MSRWD, (uint32_t) data);
IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data);
/* Setup and write the address cycle command */
command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
(phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
(IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
(phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
(IXGB_MSCA_ADDR_CYCLE | IXGB_MSCA_MDI_COMMAND));
IXGB_WRITE_REG(hw, MSCA, command);
/**************************************************************
** Check every 10 usec to see if the address cycle completed
** The COMMAND bit will clear when the operation is complete.
** This may take as long as 64 usecs (we'll wait 100 usecs max)
** from the CPU Write to the Ready bit assertion.
**************************************************************/
/**************************************************************
** Check every 10 usec to see if the address cycle completed
** The COMMAND bit will clear when the operation is complete.
** This may take as long as 64 usecs (we'll wait 100 usecs max)
** from the CPU Write to the Ready bit assertion.
**************************************************************/
for (i = 0; i < 10; i++) {
for(i = 0; i < 10; i++)
{
udelay(10);
command = IXGB_READ_REG(hw, MSCA);
......@@ -830,21 +862,22 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
ASSERT((command & IXGB_MSCA_MDI_COMMAND) == 0);
/* Address cycle complete, setup and write the write command */
command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
(phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
(IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
(device_type << IXGB_MSCA_DEV_TYPE_SHIFT) |
(phy_address << IXGB_MSCA_PHY_ADDR_SHIFT) |
(IXGB_MSCA_WRITE | IXGB_MSCA_MDI_COMMAND));
IXGB_WRITE_REG(hw, MSCA, command);
/**************************************************************
** Check every 10 usec to see if the read command completed
** The COMMAND bit will clear when the operation is complete.
** The write may take as long as 64 usecs (we'll wait 100 usecs max)
** from the CPU Write to the Ready bit assertion.
**************************************************************/
/**************************************************************
** Check every 10 usec to see if the read command completed
** The COMMAND bit will clear when the operation is complete.
** The write may take as long as 64 usecs (we'll wait 100 usecs max)
** from the CPU Write to the Ready bit assertion.
**************************************************************/
for (i = 0; i < 10; i++) {
for(i = 0; i < 10; i++)
{
udelay(10);
command = IXGB_READ_REG(hw, MSCA);
......@@ -865,7 +898,8 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
*
* Called by any function that needs to check the link status of the adapter.
*****************************************************************************/
void ixgb_check_for_link(struct ixgb_hw *hw)
void
ixgb_check_for_link(struct ixgb_hw *hw)
{
uint32_t status_reg;
uint32_t xpcss_reg;
......@@ -927,14 +961,15 @@ boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
void ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
void
ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
volatile uint32_t temp_reg;
DEBUGFUNC("ixgb_clear_hw_cntrs");
/* if we are stopped or resetting exit gracefully */
if (hw->adapter_stopped) {
if(hw->adapter_stopped) {
DEBUGOUT("Exiting because the adapter is stopped!!!\n");
return;
}
......@@ -1007,7 +1042,8 @@ void ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
void ixgb_led_on(struct ixgb_hw *hw)
void
ixgb_led_on(struct ixgb_hw *hw)
{
uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
......@@ -1022,7 +1058,8 @@ void ixgb_led_on(struct ixgb_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
void ixgb_led_off(struct ixgb_hw *hw)
void
ixgb_led_off(struct ixgb_hw *hw)
{
uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
......@@ -1037,18 +1074,19 @@ void ixgb_led_off(struct ixgb_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static void ixgb_get_bus_info(struct ixgb_hw *hw)
static void
ixgb_get_bus_info(struct ixgb_hw *hw)
{
uint32_t status_reg;
status_reg = IXGB_READ_REG(hw, STATUS);
hw->bus.type = (status_reg & IXGB_STATUS_PCIX_MODE) ?
ixgb_bus_type_pcix : ixgb_bus_type_pci;
ixgb_bus_type_pcix : ixgb_bus_type_pci;
if (hw->bus.type == ixgb_bus_type_pci) {
hw->bus.speed = (status_reg & IXGB_STATUS_PCI_SPD) ?
ixgb_bus_speed_66 : ixgb_bus_speed_33;
ixgb_bus_speed_66 : ixgb_bus_speed_33;
} else {
switch (status_reg & IXGB_STATUS_PCIX_SPD_MASK) {
case IXGB_STATUS_PCIX_SPD_66:
......@@ -1067,7 +1105,7 @@ static void ixgb_get_bus_info(struct ixgb_hw *hw)
}
hw->bus.width = (status_reg & IXGB_STATUS_BUS64) ?
ixgb_bus_width_64 : ixgb_bus_width_32;
ixgb_bus_width_64 : ixgb_bus_width_32;
return;
}
......@@ -1078,7 +1116,8 @@ static void ixgb_get_bus_info(struct ixgb_hw *hw)
* mac_addr - pointer to MAC address.
*
*****************************************************************************/
boolean_t mac_addr_valid(uint8_t * mac_addr)
boolean_t
mac_addr_valid(uint8_t *mac_addr)
{
boolean_t is_valid = TRUE;
DEBUGFUNC("mac_addr_valid");
......@@ -1095,9 +1134,11 @@ boolean_t mac_addr_valid(uint8_t * mac_addr)
}
/* Reject the zero address */
else if (mac_addr[0] == 0 &&
mac_addr[1] == 0 &&
mac_addr[2] == 0 &&
mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) {
mac_addr[1] == 0 &&
mac_addr[2] == 0 &&
mac_addr[3] == 0 &&
mac_addr[4] == 0 &&
mac_addr[5] == 0) {
DEBUGOUT("MAC address is all zeros\n");
is_valid = FALSE;
}
......@@ -1110,7 +1151,8 @@ boolean_t mac_addr_valid(uint8_t * mac_addr)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
boolean_t ixgb_link_reset(struct ixgb_hw * hw)
boolean_t
ixgb_link_reset(struct ixgb_hw *hw)
{
boolean_t link_status = FALSE;
uint8_t wait_retries = MAX_RESET_ITERATIONS;
......@@ -1140,20 +1182,22 @@ boolean_t ixgb_link_reset(struct ixgb_hw * hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
void ixgb_optics_reset(struct ixgb_hw *hw)
void
ixgb_optics_reset(struct ixgb_hw *hw)
{
if (hw->phy_type == ixgb_phy_type_txn17401) {
uint16_t mdio_reg;
ixgb_write_phy_reg(hw,
MDIO_PMA_PMD_CR1,
IXGB_PHY_ADDRESS,
MDIO_PMA_PMD_DID, MDIO_PMA_PMD_CR1_RESET);
mdio_reg = ixgb_read_phy_reg(hw,
MDIO_PMA_PMD_CR1,
IXGB_PHY_ADDRESS,
MDIO_PMA_PMD_DID);
MDIO_PMA_PMD_CR1,
IXGB_PHY_ADDRESS,
MDIO_PMA_PMD_DID,
MDIO_PMA_PMD_CR1_RESET);
mdio_reg = ixgb_read_phy_reg( hw,
MDIO_PMA_PMD_CR1,
IXGB_PHY_ADDRESS,
MDIO_PMA_PMD_DID);
}
return;
......
......@@ -621,12 +621,12 @@ struct ixgb_context_desc {
#define IXGB_RAR_ENTRIES 3 /* Number of entries in Rx Address array */
#define IXGB_MEMORY_REGISTER_BASE_ADDRESS 0
#define ENET_HEADER_SIZE 14
#define ENET_FCS_LENGTH 4
#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
#define ENET_HEADER_SIZE 14
#define ENET_FCS_LENGTH 4
#define IXGB_MAX_NUM_MULTICAST_ADDRESSES 128
#define IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS 60
#define IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS 1514
#define IXGB_MAX_JUMBO_FRAME_SIZE 0x3F00
/* Phy Addresses */
#define IXGB_OPTICAL_PHY_ADDR 0x0 /* Optical Module phy address */
......@@ -789,32 +789,39 @@ extern void ixgb_check_for_link(struct ixgb_hw *hw);
extern boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw);
extern boolean_t ixgb_setup_fc(struct ixgb_hw *hw);
extern void ixgb_clear_hw_cntrs(struct ixgb_hw *hw);
extern boolean_t mac_addr_valid(uint8_t * mac_addr);
extern boolean_t mac_addr_valid(uint8_t *mac_addr);
extern uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw,
uint32_t reg_addr,
uint32_t phy_addr, uint32_t device_type);
uint32_t reg_addr,
uint32_t phy_addr,
uint32_t device_type);
extern void ixgb_write_phy_reg(struct ixgb_hw *hw,
uint32_t reg_addr,
uint32_t phy_addr,
uint32_t device_type, uint16_t data);
uint32_t reg_addr,
uint32_t phy_addr,
uint32_t device_type,
uint16_t data);
extern void ixgb_rar_set(struct ixgb_hw *hw,
uint8_t *addr,
uint32_t index);
extern void ixgb_rar_set(struct ixgb_hw *hw, uint8_t * addr, uint32_t index);
/* Filters (multicast, vlan, receive) */
extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
uint8_t * mc_addr_list,
uint32_t mc_addr_count, uint32_t pad);
uint8_t *mc_addr_list,
uint32_t mc_addr_count,
uint32_t pad);
/* Vfta functions */
extern void ixgb_write_vfta(struct ixgb_hw *hw,
uint32_t offset, uint32_t value);
uint32_t offset,
uint32_t value);
extern void ixgb_clear_vfta(struct ixgb_hw *hw);
/* Access functions to eeprom data */
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t * mac_addr);
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
uint16_t ixgb_get_ee_compatibility(struct ixgb_hw *hw);
uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
uint16_t ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw);
......@@ -832,6 +839,9 @@ uint16_t ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
/* Everything else */
void ixgb_led_on(struct ixgb_hw *hw);
void ixgb_led_off(struct ixgb_hw *hw);
void ixgb_write_pci_cfg(struct ixgb_hw *hw, uint32_t reg, uint16_t * value);
void ixgb_write_pci_cfg(struct ixgb_hw *hw,
uint32_t reg,
uint16_t * value);
#endif /* _IXGB_HW_H_ */
#endif /* _IXGB_HW_H_ */
......@@ -33,22 +33,16 @@
** The Device and Vendor IDs for 10 Gigabit MACs
**********************************************************************/
#define INTEL_VENDOR_ID 0x8086
#define INTEL_SUBVENDOR_ID 0x8086
#define INTEL_VENDOR_ID 0x8086
#define INTEL_SUBVENDOR_ID 0x8086
#define IXGB_DEVICE_ID_82597EX 0x1048
#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
#define IXGB_SUBDEVICE_ID_A11F 0xA11F
#define IXGB_SUBDEVICE_ID_A01F 0xA01F
#define IXGB_SUBDEVICE_ID_A15F 0xA15F
#define IXGB_SUBDEVICE_ID_A05F 0xA05F
#define IXGB_SUBDEVICE_ID_A12F 0xA12F
#define IXGB_SUBDEVICE_ID_A02F 0xA02F
#define IXGB_DEVICE_ID_82597EX 0x1048
#define IXGB_DEVICE_ID_82597EX_SR 0x1A48
#define IXGB_DEVICE_ID_82597EX_LR 0x1B48
#define IXGB_SUBDEVICE_ID_A11F 0xA11F
#define IXGB_SUBDEVICE_ID_A01F 0xA01F
#endif /* #ifndef _IXGB_IDS_H_ */
#endif /* #ifndef _IXGB_IDS_H_ */
/* End of File */
......@@ -70,8 +70,6 @@ MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
/* Local Function Prototypes */
static inline void ixgb_irq_disable(struct ixgb_adapter *adapter);
static inline void ixgb_irq_enable(struct ixgb_adapter *adapter);
int ixgb_up(struct ixgb_adapter *adapter);
void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
void ixgb_reset(struct ixgb_adapter *adapter);
......@@ -101,9 +99,6 @@ static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
static int ixgb_set_mac(struct net_device *netdev, void *p);
static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
static inline void ixgb_rx_checksum(struct ixgb_adapter *adapter,
struct ixgb_rx_desc *rx_desc,
struct sk_buff *skb);
#ifdef CONFIG_IXGB_NAPI
static int ixgb_clean(struct net_device *netdev, int *budget);
static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
......@@ -167,7 +162,8 @@ MODULE_LICENSE("GPL");
* loaded. All it does is register with the PCI subsystem.
**/
static int __init ixgb_init_module(void)
static int __init
ixgb_init_module(void)
{
int ret;
printk(KERN_INFO "%s - version %s\n",
......@@ -176,7 +172,7 @@ static int __init ixgb_init_module(void)
printk(KERN_INFO "%s\n", ixgb_copyright);
ret = pci_module_init(&ixgb_driver);
if (ret >= 0) {
if(ret >= 0) {
register_reboot_notifier(&ixgb_notifier_reboot);
}
return ret;
......@@ -191,7 +187,8 @@ module_init(ixgb_init_module);
* from memory.
**/
static void __exit ixgb_exit_module(void)
static void __exit
ixgb_exit_module(void)
{
unregister_reboot_notifier(&ixgb_notifier_reboot);
pci_unregister_driver(&ixgb_driver);
......@@ -204,7 +201,8 @@ module_exit(ixgb_exit_module);
* @adapter: board private structure
**/
static inline void ixgb_irq_disable(struct ixgb_adapter *adapter)
static inline void
ixgb_irq_disable(struct ixgb_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
......@@ -217,17 +215,19 @@ static inline void ixgb_irq_disable(struct ixgb_adapter *adapter)
* @adapter: board private structure
**/
static inline void ixgb_irq_enable(struct ixgb_adapter *adapter)
static inline void
ixgb_irq_enable(struct ixgb_adapter *adapter)
{
if (atomic_dec_and_test(&adapter->irq_sem)) {
if(atomic_dec_and_test(&adapter->irq_sem)) {
IXGB_WRITE_REG(&adapter->hw, IMS,
IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
IXGB_INT_RXO | IXGB_INT_LSC);
IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
IXGB_INT_RXO | IXGB_INT_LSC);
IXGB_WRITE_FLUSH(&adapter->hw);
}
}
int ixgb_up(struct ixgb_adapter *adapter)
int
ixgb_up(struct ixgb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
int err;
......@@ -270,19 +270,19 @@ int ixgb_up(struct ixgb_adapter *adapter)
/* disable interrupts and get the hardware into a known state */
IXGB_WRITE_REG(&adapter->hw, IMC, 0xffffffff);
if ((hw->max_frame_size != max_frame) ||
(hw->max_frame_size !=
(IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
if((hw->max_frame_size != max_frame) ||
(hw->max_frame_size !=
(IXGB_READ_REG(hw, MFS) >> IXGB_MFS_SHIFT))) {
hw->max_frame_size = max_frame;
IXGB_WRITE_REG(hw, MFS, hw->max_frame_size << IXGB_MFS_SHIFT);
if (hw->max_frame_size >
IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
if(hw->max_frame_size >
IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
if (!(ctrl0 & IXGB_CTRL0_JFE)) {
if(!(ctrl0 & IXGB_CTRL0_JFE)) {
ctrl0 |= IXGB_CTRL0_JFE;
IXGB_WRITE_REG(hw, CTRL0, ctrl0);
}
......@@ -295,7 +295,8 @@ int ixgb_up(struct ixgb_adapter *adapter)
return 0;
}
void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
void
ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
{
struct net_device *netdev = adapter->netdev;
......@@ -318,11 +319,12 @@ void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
ixgb_clean_rx_ring(adapter);
}
void ixgb_reset(struct ixgb_adapter *adapter)
void
ixgb_reset(struct ixgb_adapter *adapter)
{
ixgb_adapter_stop(&adapter->hw);
if (!ixgb_init_hw(&adapter->hw))
if(!ixgb_init_hw(&adapter->hw))
IXGB_DBG("ixgb_init_hw failed.\n");
}
......@@ -339,7 +341,8 @@ void ixgb_reset(struct ixgb_adapter *adapter)
**/
static int __devinit
ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgb_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct net_device *netdev = NULL;
struct ixgb_adapter *adapter;
......@@ -350,26 +353,26 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int i;
int err;
if ((err = pci_enable_device(pdev)))
if((err = pci_enable_device(pdev)))
return err;
if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
if(!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
pci_using_dac = 1;
} else {
if ((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
if((err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) {
IXGB_ERR("No usable DMA configuration, aborting\n");
return err;
}
pci_using_dac = 0;
}
if ((err = pci_request_regions(pdev, ixgb_driver_name)))
if((err = pci_request_regions(pdev, ixgb_driver_name)))
return err;
pci_set_master(pdev);
netdev = alloc_etherdev(sizeof(struct ixgb_adapter));
if (!netdev) {
if(!netdev) {
err = -ENOMEM;
goto err_alloc_etherdev;
}
......@@ -387,15 +390,15 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mmio_len = pci_resource_len(pdev, BAR_0);
adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
if (!adapter->hw.hw_addr) {
if(!adapter->hw.hw_addr) {
err = -EIO;
goto err_ioremap;
}
for (i = BAR_1; i <= BAR_5; i++) {
if (pci_resource_len(pdev, i) == 0)
for(i = BAR_1; i <= BAR_5; i++) {
if(pci_resource_len(pdev, i) == 0)
continue;
if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
if(pci_resource_flags(pdev, i) & IORESOURCE_IO) {
adapter->hw.io_base = pci_resource_start(pdev, i);
break;
}
......@@ -432,22 +435,24 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* setup the private structure */
if ((err = ixgb_sw_init(adapter)))
if((err = ixgb_sw_init(adapter)))
goto err_sw_init;
netdev->features = NETIF_F_SG |
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
NETIF_F_HW_CSUM |
NETIF_F_HW_VLAN_TX |
NETIF_F_HW_VLAN_RX |
NETIF_F_HW_VLAN_FILTER;
#ifdef NETIF_F_TSO
netdev->features |= NETIF_F_TSO;
#endif
if (pci_using_dac)
if(pci_using_dac)
netdev->features |= NETIF_F_HIGHDMA;
/* make sure the EEPROM is good */
if (!ixgb_validate_eeprom_checksum(&adapter->hw)) {
if(!ixgb_validate_eeprom_checksum(&adapter->hw)) {
printk(KERN_ERR "The EEPROM Checksum Is Not Valid\n");
err = -EIO;
goto err_eeprom;
......@@ -455,7 +460,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ixgb_get_ee_mac_addr(&adapter->hw, netdev->dev_addr);
if (!is_valid_ether_addr(netdev->dev_addr)) {
if(!is_valid_ether_addr(netdev->dev_addr)) {
err = -EIO;
goto err_eeprom;
}
......@@ -469,7 +474,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->tx_timeout_task,
(void (*)(void *))ixgb_tx_timeout_task, netdev);
if ((err = register_netdev(netdev)))
if((err = register_netdev(netdev)))
goto err_register;
/* we're going to reset, so assume we have no link for now */
......@@ -478,7 +483,7 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_stop_queue(netdev);
printk(KERN_INFO "%s: Intel(R) PRO/10GbE Network Connection\n",
netdev->name);
netdev->name);
ixgb_check_options(adapter);
/* reset the hardware with the new settings */
......@@ -487,13 +492,13 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
cards_found++;
return 0;
err_register:
err_sw_init:
err_eeprom:
err_register:
err_sw_init:
err_eeprom:
iounmap(adapter->hw.hw_addr);
err_ioremap:
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
err_alloc_etherdev:
pci_release_regions(pdev);
return err;
}
......@@ -508,7 +513,8 @@ ixgb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* memory.
**/
static void __devexit ixgb_remove(struct pci_dev *pdev)
static void __devexit
ixgb_remove(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev->priv;
......@@ -530,7 +536,8 @@ static void __devexit ixgb_remove(struct pci_dev *pdev)
* OS network device settings (MTU size).
**/
static int __devinit ixgb_sw_init(struct ixgb_adapter *adapter)
static int __devinit
ixgb_sw_init(struct ixgb_adapter *adapter)
{
struct ixgb_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev;
......@@ -578,31 +585,32 @@ static int __devinit ixgb_sw_init(struct ixgb_adapter *adapter)
* and the stack is notified that the interface is ready.
**/
static int ixgb_open(struct net_device *netdev)
static int
ixgb_open(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev->priv;
int err;
/* allocate transmit descriptors */
if ((err = ixgb_setup_tx_resources(adapter)))
if((err = ixgb_setup_tx_resources(adapter)))
goto err_setup_tx;
/* allocate receive descriptors */
if ((err = ixgb_setup_rx_resources(adapter)))
if((err = ixgb_setup_rx_resources(adapter)))
goto err_setup_rx;
if ((err = ixgb_up(adapter)))
if((err = ixgb_up(adapter)))
goto err_up;
return 0;
err_up:
err_up:
ixgb_free_rx_resources(adapter);
err_setup_rx:
err_setup_rx:
ixgb_free_tx_resources(adapter);
err_setup_tx:
err_setup_tx:
ixgb_reset(adapter);
return err;
......@@ -620,7 +628,8 @@ static int ixgb_open(struct net_device *netdev)
* hardware, and all transmit and receive resources are freed.
**/
static int ixgb_close(struct net_device *netdev)
static int
ixgb_close(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev->priv;
......@@ -647,8 +656,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
int size;
size = sizeof(struct ixgb_buffer) * txdr->count;
if (!txdr->buffer_info) {
txdr->buffer_info = vmalloc(size);
if(!txdr->buffer_info) {
return -ENOMEM;
}
memset(txdr->buffer_info, 0, size);
......@@ -659,7 +668,7 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
IXGB_ROUNDUP(txdr->size, 4096);
txdr->desc = pci_alloc_consistent(pdev, txdr->size, &txdr->dma);
if (!txdr->desc) {
if(!txdr->desc) {
vfree(txdr->buffer_info);
return -ENOMEM;
}
......@@ -678,7 +687,8 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
* Configure the Tx unit of the MAC after a reset.
**/
static void ixgb_configure_tx(struct ixgb_adapter *adapter)
static void
ixgb_configure_tx(struct ixgb_adapter *adapter)
{
uint64_t tdba = adapter->tx_ring.dma;
uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
......@@ -718,8 +728,8 @@ static void ixgb_configure_tx(struct ixgb_adapter *adapter)
/* Setup Transmit Descriptor Settings for this adapter */
adapter->tx_cmd_type =
IXGB_TX_DESC_TYPE
| (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
IXGB_TX_DESC_TYPE
| (adapter->tx_int_delay_enable ? IXGB_TX_DESC_CMD_IDE : 0);
}
/**
......@@ -737,8 +747,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
int size;
size = sizeof(struct ixgb_buffer) * rxdr->count;
if (!rxdr->buffer_info) {
rxdr->buffer_info = vmalloc(size);
if(!rxdr->buffer_info) {
return -ENOMEM;
}
memset(rxdr->buffer_info, 0, size);
......@@ -750,7 +760,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
rxdr->desc = pci_alloc_consistent(pdev, rxdr->size, &rxdr->dma);
if (!rxdr->desc) {
if(!rxdr->desc) {
vfree(rxdr->buffer_info);
return -ENOMEM;
}
......@@ -767,7 +777,8 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
* @adapter: Board private structure
**/
static void ixgb_setup_rctl(struct ixgb_adapter *adapter)
static void
ixgb_setup_rctl(struct ixgb_adapter *adapter)
{
uint32_t rctl;
......@@ -776,9 +787,9 @@ static void ixgb_setup_rctl(struct ixgb_adapter *adapter)
rctl &= ~(3 << IXGB_RCTL_MO_SHIFT);
rctl |=
IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
IXGB_RCTL_BAM | IXGB_RCTL_RDMTS_1_2 |
IXGB_RCTL_RXEN | IXGB_RCTL_CFF |
(adapter->hw.mc_filter_type << IXGB_RCTL_MO_SHIFT);
rctl |= IXGB_RCTL_SECRC;
......@@ -808,7 +819,8 @@ static void ixgb_setup_rctl(struct ixgb_adapter *adapter)
* Configure the Rx unit of the MAC after a reset.
**/
static void ixgb_configure_rx(struct ixgb_adapter *adapter)
static void
ixgb_configure_rx(struct ixgb_adapter *adapter)
{
uint64_t rdba = adapter->rx_ring.dma;
uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
......@@ -837,14 +849,14 @@ static void ixgb_configure_rx(struct ixgb_adapter *adapter)
IXGB_WRITE_REG(hw, RDH, 0);
IXGB_WRITE_REG(hw, RDT, 0);
/* burst 16 or burst when RXT0 */
rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
/* burst 16 or burst when RXT0*/
rxdctl = RXDCTL_WTHRESH_DEFAULT << IXGB_RXDCTL_WTHRESH_SHIFT
| RXDCTL_HTHRESH_DEFAULT << IXGB_RXDCTL_HTHRESH_SHIFT
| RXDCTL_PTHRESH_DEFAULT << IXGB_RXDCTL_PTHRESH_SHIFT;
IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
/* Enable Receive Checksum Offload for TCP and UDP */
if (adapter->rx_csum == TRUE) {
if(adapter->rx_csum == TRUE) {
rxcsum = IXGB_READ_REG(hw, RXCSUM);
rxcsum |= IXGB_RXCSUM_TUOFL;
IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
......@@ -901,7 +913,8 @@ ixgb_unmap_and_free_tx_resource(struct ixgb_adapter *adapter,
* @adapter: board private structure
**/
static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
static void
ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
{
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
struct ixgb_buffer *buffer_info;
......@@ -910,7 +923,7 @@ static void ixgb_clean_tx_ring(struct ixgb_adapter *adapter)
/* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) {
for(i = 0; i < tx_ring->count; i++) {
buffer_info = &tx_ring->buffer_info[i];
ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
}
......@@ -957,7 +970,8 @@ ixgb_free_rx_resources(struct ixgb_adapter *adapter)
* @adapter: board private structure
**/
static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
static void
ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
{
struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
struct ixgb_buffer *buffer_info;
......@@ -967,9 +981,9 @@ static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
/* Free all the Rx ring sk_buffs */
for (i = 0; i < rx_ring->count; i++) {
for(i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i];
if (buffer_info->skb) {
if(buffer_info->skb) {
pci_unmap_single(pdev,
buffer_info->dma,
......@@ -1004,12 +1018,13 @@ static void ixgb_clean_rx_ring(struct ixgb_adapter *adapter)
* Returns 0 on success, negative on failure
**/
static int ixgb_set_mac(struct net_device *netdev, void *p)
static int
ixgb_set_mac(struct net_device *netdev, void *p)
{
struct ixgb_adapter *adapter = netdev->priv;
struct sockaddr *addr = p;
if (!is_valid_ether_addr(addr->sa_data))
if(!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
......@@ -1029,7 +1044,8 @@ static int ixgb_set_mac(struct net_device *netdev, void *p)
* promiscuous mode, and all-multi behavior.
**/
static void ixgb_set_multi(struct net_device *netdev)
static void
ixgb_set_multi(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev->priv;
struct ixgb_hw *hw = &adapter->hw;
......@@ -1041,16 +1057,16 @@ static void ixgb_set_multi(struct net_device *netdev)
rctl = IXGB_READ_REG(hw, RCTL);
if (netdev->flags & IFF_PROMISC) {
if(netdev->flags & IFF_PROMISC) {
rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
} else if (netdev->flags & IFF_ALLMULTI) {
} else if(netdev->flags & IFF_ALLMULTI) {
rctl |= IXGB_RCTL_MPE;
rctl &= ~IXGB_RCTL_UPE;
} else {
rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
}
if (netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
if(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
......@@ -1058,10 +1074,10 @@ static void ixgb_set_multi(struct net_device *netdev)
IXGB_WRITE_REG(hw, RCTL, rctl);
for (i = 0, mc_ptr = netdev->mc_list; mc_ptr;
i++, mc_ptr = mc_ptr->next)
for(i = 0, mc_ptr = netdev->mc_list; mc_ptr;
i++, mc_ptr = mc_ptr->next)
memcpy(&mta[i * IXGB_ETH_LENGTH_OF_ADDRESS],
mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
mc_ptr->dmi_addr, IXGB_ETH_LENGTH_OF_ADDRESS);
ixgb_mc_addr_list_update(hw, mta, netdev->mc_count, 0);
}
......@@ -1072,7 +1088,8 @@ static void ixgb_set_multi(struct net_device *netdev)
* @data: pointer to netdev cast into an unsigned long
**/
static void ixgb_watchdog(unsigned long data)
static void
ixgb_watchdog(unsigned long data)
{
struct ixgb_adapter *adapter = (struct ixgb_adapter *)data;
struct net_device *netdev = adapter->netdev;
......@@ -1086,21 +1103,22 @@ static void ixgb_watchdog(unsigned long data)
netif_stop_queue(netdev);
}
if (adapter->hw.link_up) {
if (!netif_carrier_ok(netdev)) {
if(adapter->hw.link_up) {
if(!netif_carrier_ok(netdev)) {
printk(KERN_INFO "ixgb: %s NIC Link is Up %d Mbps %s\n",
netdev->name, 10000, "Full Duplex");
netdev->name, 10000, "Full Duplex");
adapter->link_speed = 10000;
adapter->link_duplex = FULL_DUPLEX;
netif_carrier_on(netdev);
netif_wake_queue(netdev);
}
} else {
if (netif_carrier_ok(netdev)) {
if(netif_carrier_ok(netdev)) {
adapter->link_speed = 0;
adapter->link_duplex = 0;
printk(KERN_INFO
"ixgb: %s NIC Link is Down\n", netdev->name);
"ixgb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
netif_stop_queue(netdev);
......@@ -1109,8 +1127,8 @@ static void ixgb_watchdog(unsigned long data)
ixgb_update_stats(adapter);
if (!netif_carrier_ok(netdev)) {
if (IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
if(!netif_carrier_ok(netdev)) {
if(IXGB_DESC_UNUSED(txdr) + 1 < txdr->count) {
/* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going
* to get done, so reset controller to flush Tx.
......@@ -1121,9 +1139,9 @@ static void ixgb_watchdog(unsigned long data)
/* Early detection of hung controller */
i = txdr->next_to_clean;
if (txdr->buffer_info[i].dma &&
time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
!(IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF))
if(txdr->buffer_info[i].dma &&
time_after(jiffies, txdr->buffer_info[i].time_stamp + HZ) &&
!(IXGB_READ_REG(&adapter->hw, STATUS) & IXGB_STATUS_TXOFF))
netif_stop_queue(netdev);
/* generate an interrupt to force clean up of any stragglers */
......@@ -1146,7 +1164,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
uint16_t ipcse, tucse, mss;
if (likely(skb_shinfo(skb)->tso_size)) {
if(likely(skb_shinfo(skb)->tso_size)) {
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
mss = skb_shinfo(skb)->tso_size;
skb->nh.iph->tot_len = 0;
......@@ -1173,22 +1191,16 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
context_desc->mss = cpu_to_le16(mss);
context_desc->hdr_len = hdr_len;
context_desc->status = 0;
context_desc->cmd_type_len = cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
|
IXGB_CONTEXT_DESC_CMD_TSE
|
IXGB_CONTEXT_DESC_CMD_IP
|
IXGB_CONTEXT_DESC_CMD_TCP
|
IXGB_CONTEXT_DESC_CMD_RS
|
IXGB_CONTEXT_DESC_CMD_IDE
| (skb->len -
(hdr_len)));
if (++i == adapter->tx_ring.count)
i = 0;
context_desc->cmd_type_len = cpu_to_le32(
IXGB_CONTEXT_DESC_TYPE
| IXGB_CONTEXT_DESC_CMD_TSE
| IXGB_CONTEXT_DESC_CMD_IP
| IXGB_CONTEXT_DESC_CMD_TCP
| IXGB_CONTEXT_DESC_CMD_RS
| IXGB_CONTEXT_DESC_CMD_IDE
| (skb->len - (hdr_len)));
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
return TRUE;
......@@ -1205,7 +1217,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
unsigned int i;
uint8_t css, cso;
if (likely(skb->ip_summed == CHECKSUM_HW)) {
if(likely(skb->ip_summed == CHECKSUM_HW)) {
css = skb->h.raw - skb->data;
cso = (skb->h.raw + skb->csum) - skb->data;
......@@ -1216,16 +1228,16 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
context_desc->tucso = cso;
context_desc->tucse = 0;
/* zero out any previously existing data in one instruction */
*(uint32_t *) & (context_desc->ipcss) = 0;
*(uint32_t *)&(context_desc->ipcss) = 0;
context_desc->status = 0;
context_desc->hdr_len = 0;
context_desc->mss = 0;
context_desc->cmd_type_len =
cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
| IXGB_TX_DESC_CMD_RS | IXGB_TX_DESC_CMD_IDE);
cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
| IXGB_TX_DESC_CMD_RS
| IXGB_TX_DESC_CMD_IDE);
if (++i == adapter->tx_ring.count)
i = 0;
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
return TRUE;
......@@ -1252,45 +1264,46 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
i = tx_ring->next_to_use;
while (len) {
while(len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
buffer_info->length = size;
buffer_info->dma =
pci_map_single(adapter->pdev,
skb->data + offset, size, PCI_DMA_TODEVICE);
pci_map_single(adapter->pdev,
skb->data + offset,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
len -= size;
offset += size;
count++;
if (++i == tx_ring->count)
i = 0;
if(++i == tx_ring->count) i = 0;
}
for (f = 0; f < nr_frags; f++) {
for(f = 0; f < nr_frags; f++) {
struct skb_frag_struct *frag;
frag = &skb_shinfo(skb)->frags[f];
len = frag->size;
offset = 0;
while (len) {
while(len) {
buffer_info = &tx_ring->buffer_info[i];
size = min(len, IXGB_MAX_JUMBO_FRAME_SIZE);
buffer_info->length = size;
buffer_info->dma =
pci_map_page(adapter->pdev,
frag->page,
frag->page_offset + offset,
size, PCI_DMA_TODEVICE);
pci_map_page(adapter->pdev,
frag->page,
frag->page_offset + offset,
size,
PCI_DMA_TODEVICE);
buffer_info->time_stamp = jiffies;
len -= size;
offset += size;
count++;
if (++i == tx_ring->count)
i = 0;
if(++i == tx_ring->count) i = 0;
}
}
i = (i == 0) ? tx_ring->count - 1 : i - 1;
......@@ -1301,8 +1314,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
}
static inline void
ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,
int tx_flags)
ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
{
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
struct ixgb_tx_desc *tx_desc = NULL;
......@@ -1312,36 +1324,35 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,
uint8_t popts = 0;
unsigned int i;
if (tx_flags & IXGB_TX_FLAGS_TSO) {
if(tx_flags & IXGB_TX_FLAGS_TSO) {
cmd_type_len |= IXGB_TX_DESC_CMD_TSE;
popts |= (IXGB_TX_DESC_POPTS_IXSM | IXGB_TX_DESC_POPTS_TXSM);
}
if (tx_flags & IXGB_TX_FLAGS_CSUM)
if(tx_flags & IXGB_TX_FLAGS_CSUM)
popts |= IXGB_TX_DESC_POPTS_TXSM;
if (tx_flags & IXGB_TX_FLAGS_VLAN) {
if(tx_flags & IXGB_TX_FLAGS_VLAN) {
cmd_type_len |= IXGB_TX_DESC_CMD_VLE;
}
i = tx_ring->next_to_use;
while (count--) {
while(count--) {
buffer_info = &tx_ring->buffer_info[i];
tx_desc = IXGB_TX_DESC(*tx_ring, i);
tx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
tx_desc->cmd_type_len =
cpu_to_le32(cmd_type_len | buffer_info->length);
cpu_to_le32(cmd_type_len | buffer_info->length);
tx_desc->status = status;
tx_desc->popts = popts;
tx_desc->vlan = cpu_to_le16(vlan_id);
if (++i == tx_ring->count)
i = 0;
if(++i == tx_ring->count) i = 0;
}
tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
| IXGB_TX_DESC_CMD_RS);
tx_desc->cmd_type_len |= cpu_to_le32(IXGB_TX_DESC_CMD_EOP
| IXGB_TX_DESC_CMD_RS );
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
......@@ -1359,7 +1370,8 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,
#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
static int
ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev->priv;
unsigned int first;
......@@ -1367,33 +1379,33 @@ static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
unsigned long flags;
int vlan_id = 0;
if (skb->len <= 0) {
if(skb->len <= 0) {
dev_kfree_skb_any(skb);
return 0;
}
spin_lock_irqsave(&adapter->tx_lock, flags);
if (unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&adapter->tx_lock, flags);
return 1;
}
spin_unlock_irqrestore(&adapter->tx_lock, flags);
if (adapter->vlgrp && vlan_tx_tag_present(skb)) {
if(adapter->vlgrp && vlan_tx_tag_present(skb)) {
tx_flags |= IXGB_TX_FLAGS_VLAN;
vlan_id = vlan_tx_tag_get(skb);
}
first = adapter->tx_ring.next_to_use;
if (ixgb_tso(adapter, skb))
if(ixgb_tso(adapter, skb))
tx_flags |= IXGB_TX_FLAGS_TSO;
else if (ixgb_tx_csum(adapter, skb))
else if(ixgb_tx_csum(adapter, skb))
tx_flags |= IXGB_TX_FLAGS_CSUM;
ixgb_tx_queue(adapter, ixgb_tx_map(adapter, skb, first), vlan_id,
tx_flags);
tx_flags);
netdev->trans_start = jiffies;
......@@ -1405,7 +1417,8 @@ static int ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
* @netdev: network interface device structure
**/
static void ixgb_tx_timeout(struct net_device *netdev)
static void
ixgb_tx_timeout(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev->priv;
......@@ -1413,7 +1426,8 @@ static void ixgb_tx_timeout(struct net_device *netdev)
schedule_work(&adapter->tx_timeout_task);
}
static void ixgb_tx_timeout_task(struct net_device *netdev)
static void
ixgb_tx_timeout_task(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev->priv;
......@@ -1429,7 +1443,8 @@ static void ixgb_tx_timeout_task(struct net_device *netdev)
* The statistics are actually updated from the timer callback.
**/
static struct net_device_stats *ixgb_get_stats(struct net_device *netdev)
static struct net_device_stats *
ixgb_get_stats(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev->priv;
......@@ -1444,27 +1459,28 @@ static struct net_device_stats *ixgb_get_stats(struct net_device *netdev)
* Returns 0 on success, negative on failure
**/
static int ixgb_change_mtu(struct net_device *netdev, int new_mtu)
static int
ixgb_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgb_adapter *adapter = netdev->priv;
int max_frame = new_mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
int old_max_frame = netdev->mtu + ENET_HEADER_SIZE + ENET_FCS_LENGTH;
if ((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
|| (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
if((max_frame < IXGB_MIN_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
|| (max_frame > IXGB_MAX_JUMBO_FRAME_SIZE + ENET_FCS_LENGTH)) {
IXGB_ERR("Invalid MTU setting\n");
return -EINVAL;
}
if ((max_frame <=
IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
|| (max_frame <= IXGB_RXBUFFER_2048)) {
if((max_frame <= IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH)
|| (max_frame <= IXGB_RXBUFFER_2048)) {
adapter->rx_buffer_len = IXGB_RXBUFFER_2048;
} else if (max_frame <= IXGB_RXBUFFER_4096) {
} else if(max_frame <= IXGB_RXBUFFER_4096) {
adapter->rx_buffer_len = IXGB_RXBUFFER_4096;
} else if (max_frame <= IXGB_RXBUFFER_8192) {
} else if(max_frame <= IXGB_RXBUFFER_8192) {
adapter->rx_buffer_len = IXGB_RXBUFFER_8192;
} else {
......@@ -1597,7 +1613,8 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
* @pt_regs: CPU registers structure
**/
static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs)
static irqreturn_t
ixgb_intr(int irq, void *data, struct pt_regs *regs)
{
struct net_device *netdev = data;
struct ixgb_adapter *adapter = netdev->priv;
......@@ -1607,18 +1624,19 @@ static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs)
unsigned int i;
#endif
if (unlikely(!icr))
return IRQ_NONE; /* Not our interrupt */
if(unlikely(!icr))
return IRQ_NONE; /* Not our interrupt */
if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
mod_timer(&adapter->watchdog_timer, jiffies);
}
#ifdef CONFIG_IXGB_NAPI
if (netif_rx_schedule_prep(netdev)) {
if(netif_rx_schedule_prep(netdev)) {
/* Disable interrupts and register for poll. The flush
of the posted write is intentionally left out.
*/
of the posted write is intentionally left out.
*/
atomic_inc(&adapter->irq_sem);
IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
......@@ -1677,7 +1695,8 @@ quit_polling: netif_rx_complete(netdev);
* @adapter: board private structure
**/
static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
static boolean_t
ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
{
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
struct net_device *netdev = adapter->netdev;
......@@ -1690,9 +1709,9 @@ static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = IXGB_TX_DESC(*tx_ring, eop);
while (eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
for (cleaned = FALSE; !cleaned;) {
for(cleaned = FALSE; !cleaned; ) {
tx_desc = IXGB_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
......@@ -1706,8 +1725,7 @@ static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
*(uint32_t *)&(tx_desc->status) = 0;
cleaned = (i == eop);
if (++i == tx_ring->count)
i = 0;
if(++i == tx_ring->count) i = 0;
}
eop = tx_ring->buffer_info[i].next_to_watch;
......@@ -1717,8 +1735,8 @@ static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
tx_ring->next_to_clean = i;
spin_lock(&adapter->tx_lock);
if (cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev)
&& (IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
if(cleaned && netif_queue_stopped(netdev) && netif_carrier_ok(netdev) &&
(IXGB_DESC_UNUSED(tx_ring) > IXGB_TX_QUEUE_WAKE)) {
netif_wake_queue(netdev);
}
......@@ -1736,20 +1754,21 @@ static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
static inline void
ixgb_rx_checksum(struct ixgb_adapter *adapter,
struct ixgb_rx_desc *rx_desc, struct sk_buff *skb)
struct ixgb_rx_desc *rx_desc,
struct sk_buff *skb)
{
/* Ignore Checksum bit is set OR
* TCP Checksum has not been calculated
*/
if ((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
(!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
if((rx_desc->status & IXGB_RX_DESC_STATUS_IXSM) ||
(!(rx_desc->status & IXGB_RX_DESC_STATUS_TCPCS))) {
skb->ip_summed = CHECKSUM_NONE;
return;
}
/* At this point we know the hardware did the TCP checksum */
/* now look at the TCP checksum error bit */
if (rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
if(rx_desc->errors & IXGB_RX_DESC_ERRORS_TCPE) {
/* let the stack verify checksum errors */
skb->ip_summed = CHECKSUM_NONE;
adapter->hw_csum_rx_error++;
......@@ -1786,7 +1805,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
rx_desc = IXGB_RX_DESC(*rx_ring, i);
buffer_info = &rx_ring->buffer_info[i];
while (rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
#ifdef CONFIG_IXGB_NAPI
if(*work_done >= work_to_do)
......@@ -1797,13 +1816,11 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
skb = buffer_info->skb;
prefetch(skb->data);
if (++i == rx_ring->count)
i = 0;
if(++i == rx_ring->count) i = 0;
next_rxd = IXGB_RX_DESC(*rx_ring, i);
prefetch(next_rxd);
if ((j = i + 1) == rx_ring->count)
j = 0;
if((j = i + 1) == rx_ring->count) j = 0;
next2_buffer = &rx_ring->buffer_info[j];
prefetch(next2_buffer);
......@@ -1816,16 +1833,17 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
pci_unmap_single(pdev,
buffer_info->dma,
buffer_info->length, PCI_DMA_FROMDEVICE);
buffer_info->length,
PCI_DMA_FROMDEVICE);
length = le16_to_cpu(rx_desc->length);
if (unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) {
if(unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) {
/* All receives must fit into a single buffer */
IXGB_DBG("Receive packet consumed multiple buffers "
"length<%x>\n", length);
"length<%x>\n", length);
dev_kfree_skb_irq(skb);
rx_desc->status = 0;
......@@ -1858,24 +1876,22 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
skb->protocol = eth_type_trans(skb, netdev);
#ifdef CONFIG_IXGB_NAPI
if (adapter->vlgrp
&& (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) &
IXGB_RX_DESC_SPECIAL_VLAN_MASK);
} else {
netif_receive_skb(skb);
}
#else /* CONFIG_IXGB_NAPI */
if (adapter->vlgrp
&& (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
#else /* CONFIG_IXGB_NAPI */
if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
vlan_hwaccel_rx(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) &
IXGB_RX_DESC_SPECIAL_VLAN_MASK);
} else {
netif_rx(skb);
}
#endif /* CONFIG_IXGB_NAPI */
#endif /* CONFIG_IXGB_NAPI */
netdev->last_rx = jiffies;
rx_desc->status = 0;
......@@ -1897,7 +1913,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
* @adapter: address of board private structure
**/
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
static void
ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
{
struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
struct net_device *netdev = adapter->netdev;
......@@ -1916,12 +1933,12 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
/* leave one descriptor unused */
while (--cleancount > 0) {
while(--cleancount > 0) {
rx_desc = IXGB_RX_DESC(*rx_ring, i);
skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
if (unlikely(!skb)) {
if(unlikely(!skb)) {
/* Better luck next round */
break;
}
......@@ -1937,13 +1954,14 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len;
buffer_info->dma =
pci_map_single(pdev,
pci_map_single(pdev,
skb->data,
adapter->rx_buffer_len, PCI_DMA_FROMDEVICE);
adapter->rx_buffer_len,
PCI_DMA_FROMDEVICE);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
if ((i & ~(num_group_tail_writes - 1)) == i) {
if((i & ~(num_group_tail_writes- 1)) == i) {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
......@@ -1953,8 +1971,7 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
IXGB_WRITE_REG(&adapter->hw, RDT, i);
}
if (++i == rx_ring->count)
i = 0;
if(++i == rx_ring->count) i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
......@@ -1976,7 +1993,7 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
ixgb_irq_disable(adapter);
adapter->vlgrp = grp;
if (grp) {
if(grp) {
/* enable VLAN tag insert/strip */
ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
ctrl |= IXGB_CTRL0_VME;
......@@ -2005,7 +2022,8 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
ixgb_irq_enable(adapter);
}
static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
static void
ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
{
struct ixgb_adapter *adapter = netdev->priv;
uint32_t vfta, index;
......@@ -2018,19 +2036,20 @@ static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
ixgb_write_vfta(&adapter->hw, index, vfta);
}
static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
static void
ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
{
struct ixgb_adapter *adapter = netdev->priv;
uint32_t vfta, index;
ixgb_irq_disable(adapter);
if (adapter->vlgrp)
if(adapter->vlgrp)
adapter->vlgrp->vlan_devices[vid] = NULL;
ixgb_irq_enable(adapter);
/* remove VID from filter table */
/* remove VID from filter table*/
index = (vid >> 5) & 0x7F;
vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
......@@ -2038,14 +2057,15 @@ static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
ixgb_write_vfta(&adapter->hw, index, vfta);
}
static void ixgb_restore_vlan(struct ixgb_adapter *adapter)
static void
ixgb_restore_vlan(struct ixgb_adapter *adapter)
{
ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
if (adapter->vlgrp) {
if(adapter->vlgrp) {
uint16_t vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if (!adapter->vlgrp->vlan_devices[vid])
for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if(!adapter->vlgrp->vlan_devices[vid])
continue;
ixgb_vlan_rx_add_vid(adapter->netdev, vid);
}
......@@ -2063,7 +2083,7 @@ ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
{
struct pci_dev *pdev = NULL;
switch (event) {
switch(event) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
......@@ -2080,14 +2100,15 @@ ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
* @param pdev pci driver structure used for passing to
* @param state power state to enter
**/
static int ixgb_suspend(struct pci_dev *pdev, uint32_t state)
static int
ixgb_suspend(struct pci_dev *pdev, uint32_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev->priv;
netif_device_detach(netdev);
if (netif_running(netdev))
if(netif_running(netdev))
ixgb_down(adapter, TRUE);
pci_save_state(pdev);
......
......@@ -78,19 +78,19 @@ typedef enum {
#define DEBUGOUT7 DEBUGOUT3
#define IXGB_WRITE_REG(a, reg, value) ( \
writel((value), ((a)->hw_addr + IXGB_##reg)))
writel((value), ((a)->hw_addr + IXGB_##reg)))
#define IXGB_READ_REG(a, reg) ( \
readl((a)->hw_addr + IXGB_##reg))
readl((a)->hw_addr + IXGB_##reg))
#define IXGB_WRITE_REG_ARRAY(a, reg, offset, value) ( \
writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
writel((value), ((a)->hw_addr + IXGB_##reg + ((offset) << 2))))
#define IXGB_READ_REG_ARRAY(a, reg, offset) ( \
readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
readl((a)->hw_addr + IXGB_##reg + ((offset) << 2)))
#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS)
#define IXGB_MEMCPY memcpy
#endif /* _IXGB_OSDEP_H_ */
#endif /* _IXGB_OSDEP_H_ */
......@@ -198,9 +198,10 @@ struct ixgb_option {
} arg;
};
static int __devinit ixgb_validate_option(int *value, struct ixgb_option *opt)
static int __devinit
ixgb_validate_option(int *value, struct ixgb_option *opt)
{
if (*value == OPTION_UNSET) {
if(*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
......@@ -217,32 +218,31 @@ static int __devinit ixgb_validate_option(int *value, struct ixgb_option *opt)
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
if(*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
printk(KERN_INFO "%s set to %i\n", opt->name, *value);
return 0;
}
break;
case list_option:{
int i;
struct ixgb_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
printk(KERN_INFO "%s\n",
ent->str);
return 0;
}
case list_option: {
int i;
struct ixgb_opt_list *ent;
for(i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if(*value == ent->i) {
if(ent->str[0] != '\0')
printk(KERN_INFO "%s\n", ent->str);
return 0;
}
}
}
break;
default:
BUG();
}
printk(KERN_INFO "Invalid %s specified (%i) %s\n",
opt->name, *value, opt->err);
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
......@@ -259,24 +259,24 @@ static int __devinit ixgb_validate_option(int *value, struct ixgb_option *opt)
* in a variable in the adapter structure.
**/
void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
void __devinit
ixgb_check_options(struct ixgb_adapter *adapter)
{
int bd = adapter->bd_number;
if (bd >= IXGB_MAX_NIC) {
if(bd >= IXGB_MAX_NIC) {
printk(KERN_NOTICE
"Warning: no configuration for board #%i\n", bd);
"Warning: no configuration for board #%i\n", bd);
printk(KERN_NOTICE "Using defaults for all values\n");
bd = IXGB_MAX_NIC;
}
{ /* Transmit Descriptor Count */
{ /* Transmit Descriptor Count */
struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_TXD),
.def = DEFAULT_TXD,
.arg = {.r = {.min = MIN_TXD,
.max = MAX_TXD}}
.err = "using default of " __MODULE_STRING(DEFAULT_TXD),
.def = DEFAULT_TXD,
.arg = { .r = { .min = MIN_TXD,
.max = MAX_TXD}}
};
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
......@@ -288,14 +288,14 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
}
IXGB_ROUNDUP(tx_ring->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Descriptor Count */
{ /* Receive Descriptor Count */
struct ixgb_option opt = {
.type = range_option,
.name = "Receive Descriptors",
.err = "using default of " __MODULE_STRING(DEFAULT_RXD),
.def = DEFAULT_RXD,
.arg = {.r = {.min = MIN_RXD,
.max = MAX_RXD}}
.err = "using default of " __MODULE_STRING(DEFAULT_RXD),
.def = DEFAULT_RXD,
.arg = { .r = { .min = MIN_RXD,
.max = MAX_RXD}}
};
struct ixgb_desc_ring *rx_ring = &adapter->rx_ring;
......@@ -307,12 +307,12 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
}
IXGB_ROUNDUP(rx_ring->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
}
{ /* Receive Checksum Offload Enable */
{ /* Receive Checksum Offload Enable */
struct ixgb_option opt = {
.type = enable_option,
.name = "Receive Checksum Offload",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if(num_XsumRX > bd) {
......@@ -323,23 +323,22 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
adapter->rx_csum = opt.def;
}
}
{ /* Flow Control */
{ /* Flow Control */
struct ixgb_opt_list fc_list[] =
{ {ixgb_fc_none, "Flow Control Disabled"},
{ixgb_fc_rx_pause, "Flow Control Receive Only"},
{ixgb_fc_tx_pause, "Flow Control Transmit Only"},
{ixgb_fc_full, "Flow Control Enabled"},
{ixgb_fc_default, "Flow Control Hardware Default"}
};
{{ ixgb_fc_none, "Flow Control Disabled" },
{ ixgb_fc_rx_pause,"Flow Control Receive Only" },
{ ixgb_fc_tx_pause,"Flow Control Transmit Only" },
{ ixgb_fc_full, "Flow Control Enabled" },
{ ixgb_fc_default, "Flow Control Hardware Default" }};
struct ixgb_option opt = {
.type = list_option,
.name = "Flow Control",
.err = "reading default settings from EEPROM",
.def = ixgb_fc_full,
.arg = {.l = {.nr = LIST_LEN(fc_list),
.p = fc_list}}
.err = "reading default settings from EEPROM",
.def = ixgb_fc_full,
.arg = { .l = { .nr = LIST_LEN(fc_list),
.p = fc_list }}
};
if(num_FlowControl > bd) {
......@@ -350,15 +349,14 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
adapter->hw.fc.type = opt.def;
}
}
{ /* Receive Flow Control High Threshold */
{ /* Receive Flow Control High Threshold */
struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control High Threshold",
.err =
"using default of " __MODULE_STRING(DEFAULT_FCRTH),
.def = DEFAULT_FCRTH,
.arg = {.r = {.min = MIN_FCRTH,
.max = MAX_FCRTH}}
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTH),
.def = DEFAULT_FCRTH,
.arg = { .r = { .min = MIN_FCRTH,
.max = MAX_FCRTH}}
};
if(num_RxFCHighThresh > bd) {
......@@ -371,15 +369,14 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
printk (KERN_INFO
"Ignoring RxFCHighThresh when no RxFC\n");
}
{ /* Receive Flow Control Low Threshold */
{ /* Receive Flow Control Low Threshold */
struct ixgb_option opt = {
.type = range_option,
.name = "Rx Flow Control Low Threshold",
.err =
"using default of " __MODULE_STRING(DEFAULT_FCRTL),
.def = DEFAULT_FCRTL,
.arg = {.r = {.min = MIN_FCRTL,
.max = MAX_FCRTL}}
.err = "using default of " __MODULE_STRING(DEFAULT_FCRTL),
.def = DEFAULT_FCRTL,
.arg = { .r = { .min = MIN_FCRTL,
.max = MAX_FCRTL}}
};
if(num_RxFCLowThresh > bd) {
......@@ -392,16 +389,14 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
printk (KERN_INFO
"Ignoring RxFCLowThresh when no RxFC\n");
}
{ /* Flow Control Pause Time Request */
{ /* Flow Control Pause Time Request*/
struct ixgb_option opt = {
.type = range_option,
.name = "Flow Control Pause Time Request",
.err =
"using default of "
__MODULE_STRING(DEFAULT_FCPAUSE),
.def = DEFAULT_FCPAUSE,
.arg = {.r = {.min = MIN_FCPAUSE,
.max = MAX_FCPAUSE}}
.err = "using default of "__MODULE_STRING(DEFAULT_FCPAUSE),
.def = DEFAULT_FCPAUSE,
.arg = { .r = { .min = MIN_FCPAUSE,
.max = MAX_FCPAUSE}}
};
if(num_FCReqTimeout > bd) {
......@@ -420,22 +415,21 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
/* high must be greater than low */
if (adapter->hw.fc.high_water < (adapter->hw.fc.low_water + 8)) {
/* set defaults */
printk(KERN_INFO
"RxFCHighThresh must be >= (RxFCLowThresh + 8), "
"Using Defaults\n");
printk (KERN_INFO
"RxFCHighThresh must be >= (RxFCLowThresh + 8), "
"Using Defaults\n");
adapter->hw.fc.high_water = DEFAULT_FCRTH;
adapter->hw.fc.low_water = DEFAULT_FCRTL;
adapter->hw.fc.low_water = DEFAULT_FCRTL;
}
}
{ /* Receive Interrupt Delay */
{ /* Receive Interrupt Delay */
struct ixgb_option opt = {
.type = range_option,
.name = "Receive Interrupt Delay",
.err =
"using default of " __MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = {.r = {.min = MIN_RDTR,
.max = MAX_RDTR}}
.err = "using default of " __MODULE_STRING(DEFAULT_RDTR),
.def = DEFAULT_RDTR,
.arg = { .r = { .min = MIN_RDTR,
.max = MAX_RDTR}}
};
if(num_RxIntDelay > bd) {
......@@ -445,15 +439,14 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
adapter->rx_int_delay = opt.def;
}
}
{ /* Transmit Interrupt Delay */
{ /* Transmit Interrupt Delay */
struct ixgb_option opt = {
.type = range_option,
.name = "Transmit Interrupt Delay",
.err =
"using default of " __MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = {.r = {.min = MIN_TIDV,
.max = MAX_TIDV}}
.err = "using default of " __MODULE_STRING(DEFAULT_TIDV),
.def = DEFAULT_TIDV,
.arg = { .r = { .min = MIN_TIDV,
.max = MAX_TIDV}}
};
if(num_TxIntDelay > bd) {
......@@ -464,12 +457,12 @@ void __devinit ixgb_check_options(struct ixgb_adapter *adapter)
}
}
{ /* Transmit Interrupt Delay Enable */
{ /* Transmit Interrupt Delay Enable */
struct ixgb_option opt = {
.type = enable_option,
.name = "Tx Interrupt Delay Enable",
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
.err = "defaulting to Enabled",
.def = OPTION_ENABLED
};
if(num_IntDelayEnable > bd) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment