Commit 1c43f75a authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-10-08

This series contains updates to i40e and i40evf only (again).

Jesse fixes an issue where the driver was issuing a WARN_ON during ring
size changes because the code was cloning the rx_ring struct but not
zeroing out the pointers before allocating new memory, so simply zero
out the pointers.  Also reduced the function call overhead by moving
the interrupt enable function by moving it to the header file, which it
in turn allows us to inline it.  Also does a thorough job of code
cleanup to fix spaces after declarations, remove unnecessary braces
and breaks, remove another __func__ use and general code tidiness.

Mitch adds mover verbose error messages when the number of supported VFs
is reported in driver init and it different from the number reported in
config space.  Updated the VF driver to now detect a reset with the
VF_ARQLEN register since the enable bit is cleared when the VF is reset
and it stays cleared until the VF driver processes the reset and
re-enables the admin queue which is more reliable than using the
VFGEN_RSTAT as previously.

Neerav adds parsing for CEE DCBx TLVs from the LLDP MIB since there is
a need to get the CEE DesiredCfg Tx by firmware and DCB configuration
Rx from peer for debug and other application purposes.

Carolyn fixes a problem where the PF's Flow Director filter table would
have an entry that the hardware was unable to add, when this occurs an
invalid entry gets replayed and a valid one is lost.

Matt fixes an issue where multiple link up messages can be logged
resulting from admin queue link status timing when link properties are
changed.

Shannon adds the ability to control the period link polling through
ethtool to be able to switch it off and on for debugging link issues.

Serey explicitly assigns the enum index for each VSI type so that the PF
and VF always reference to the same VSI type event if the enum lists
are different.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents df718423 ce6fcb3f
......@@ -101,6 +101,7 @@
/* Ethtool Private Flags */
#define I40E_PRIV_FLAGS_NPAR_FLAG BIT(0)
#define I40E_PRIV_FLAGS_LINKPOLL_FLAG BIT(1)
#define I40E_NVM_VERSION_LO_SHIFT 0
#define I40E_NVM_VERSION_LO_MASK (0xff << I40E_NVM_VERSION_LO_SHIFT)
......@@ -327,6 +328,7 @@ struct i40e_pf {
#define I40E_FLAG_WB_ON_ITR_CAPABLE BIT_ULL(35)
#define I40E_FLAG_VEB_STATS_ENABLED BIT_ULL(37)
#define I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE BIT_ULL(38)
#define I40E_FLAG_LINK_POLLING_ENABLED BIT_ULL(39)
#define I40E_FLAG_VEB_MODE_ENABLED BIT_ULL(40)
/* tracks features that get auto disabled by errors */
......@@ -410,6 +412,7 @@ struct i40e_pf {
u32 npar_min_bw;
u32 ioremap_len;
u32 fd_inv;
};
struct i40e_mac_filter {
......@@ -536,6 +539,7 @@ struct i40e_vsi {
u16 idx; /* index in pf->vsi[] */
u16 veb_idx; /* index of VEB parent */
struct kobject *kobj; /* sysfs object */
bool current_isup; /* Sync 'link up' logging */
/* VSI specific handlers */
irqreturn_t (*irq_handler)(int irq, void *data);
......@@ -702,7 +706,24 @@ static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {}
static inline void i40e_dbg_init(void) {}
static inline void i40e_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS*/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
/**
* i40e_irq_dynamic_enable - Enable default interrupt generation settings
* @vsi: pointer to a vsi
* @vector: enable a particular Hw Interrupt vector, without base_vector
**/
static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u32 val;
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
/* skip the flush */
}
void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
......@@ -773,4 +794,5 @@ int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi);
i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup);
#endif /* _I40E_H_ */
......@@ -683,8 +683,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"%s: ntc %d head %d.\n", __func__, ntc,
rd32(hw, hw->aq.asq.head));
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
......
......@@ -1035,7 +1035,7 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
if (flags & I40E_AQC_LAN_ADDR_VALID)
memcpy(mac_addr, &addrs.pf_lan_mac, sizeof(addrs.pf_lan_mac));
ether_addr_copy(mac_addr, addrs.pf_lan_mac);
return status;
}
......@@ -1058,7 +1058,7 @@ i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_PORT_ADDR_VALID)
memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
ether_addr_copy(mac_addr, addrs.port_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
......@@ -1116,7 +1116,7 @@ i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
return status;
if (flags & I40E_AQC_SAN_ADDR_VALID)
memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
ether_addr_copy(mac_addr, addrs.pf_san_mac);
else
status = I40E_ERR_INVALID_MAC_ADDR;
......@@ -2363,6 +2363,7 @@ i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
*vebs_free = le16_to_cpu(cmd_resp->vebs_free);
if (floating) {
u16 flags = le16_to_cpu(cmd_resp->veb_flags);
if (flags & I40E_AQC_ADD_VEB_FLOATING)
*floating = true;
else
......@@ -3777,7 +3778,7 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
}
if (mac_addr)
memcpy(cmd->mac, mac_addr, ETH_ALEN);
ether_addr_copy(cmd->mac, mac_addr);
cmd->etype = cpu_to_le16(ethtype);
cmd->flags = cpu_to_le16(flags);
......
......@@ -291,6 +291,182 @@ static void i40e_parse_ieee_tlv(struct i40e_lldp_org_tlv *tlv,
}
}
/**
* i40e_parse_cee_pgcfg_tlv
* @tlv: CEE DCBX PG CFG TLV
* @dcbcfg: Local store to update ETS CFG data
*
* Parses CEE DCBX PG CFG TLV
**/
static void i40e_parse_cee_pgcfg_tlv(struct i40e_cee_feat_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
struct i40e_dcb_ets_config *etscfg;
u8 *buf = tlv->tlvinfo;
u16 offset = 0;
u8 priority;
int i;
etscfg = &dcbcfg->etscfg;
if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
etscfg->willing = 1;
etscfg->cbs = 0;
/* Priority Group Table (4 octets)
* Octets:| 1 | 2 | 3 | 4 |
* -----------------------------------------
* |pri0|pri1|pri2|pri3|pri4|pri5|pri6|pri7|
* -----------------------------------------
* Bits:|7 4|3 0|7 4|3 0|7 4|3 0|7 4|3 0|
* -----------------------------------------
*/
for (i = 0; i < 4; i++) {
priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_1_MASK) >>
I40E_CEE_PGID_PRIO_1_SHIFT);
etscfg->prioritytable[i * 2] = priority;
priority = (u8)((buf[offset] & I40E_CEE_PGID_PRIO_0_MASK) >>
I40E_CEE_PGID_PRIO_0_SHIFT);
etscfg->prioritytable[i * 2 + 1] = priority;
offset++;
}
/* PG Percentage Table (8 octets)
* Octets:| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 |
* ---------------------------------
* |pg0|pg1|pg2|pg3|pg4|pg5|pg6|pg7|
* ---------------------------------
*/
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
etscfg->tcbwtable[i] = buf[offset++];
/* Number of TCs supported (1 octet) */
etscfg->maxtcs = buf[offset];
}
/**
* i40e_parse_cee_pfccfg_tlv
* @tlv: CEE DCBX PFC CFG TLV
* @dcbcfg: Local store to update PFC CFG data
*
* Parses CEE DCBX PFC CFG TLV
**/
static void i40e_parse_cee_pfccfg_tlv(struct i40e_cee_feat_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
u8 *buf = tlv->tlvinfo;
if (tlv->en_will_err & I40E_CEE_FEAT_TLV_WILLING_MASK)
dcbcfg->pfc.willing = 1;
/* ------------------------
* | PFC Enable | PFC TCs |
* ------------------------
* | 1 octet | 1 octet |
*/
dcbcfg->pfc.pfcenable = buf[0];
dcbcfg->pfc.pfccap = buf[1];
}
/**
* i40e_parse_cee_app_tlv
* @tlv: CEE DCBX APP TLV
* @dcbcfg: Local store to update APP PRIO data
*
* Parses CEE DCBX APP PRIO TLV
**/
static void i40e_parse_cee_app_tlv(struct i40e_cee_feat_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
u16 length, typelength, offset = 0;
struct i40e_cee_app_prio *app;
u8 i, up;
typelength = ntohs(tlv->hdr.typelen);
length = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
I40E_LLDP_TLV_LEN_SHIFT);
dcbcfg->numapps = length / sizeof(*app);
if (!dcbcfg->numapps)
return;
for (i = 0; i < dcbcfg->numapps; i++) {
app = (struct i40e_cee_app_prio *)(tlv->tlvinfo + offset);
for (up = 0; up < I40E_MAX_USER_PRIORITY; up++) {
if (app->prio_map & (1 << up))
break;
}
dcbcfg->app[i].priority = up;
/* Get Selector from lower 2 bits */
dcbcfg->app[i].selector = (app->upper_oui_sel &
I40E_CEE_APP_SELECTOR_MASK);
dcbcfg->app[i].protocolid = ntohs(app->protocol);
/* Move to next app */
offset += sizeof(*app);
}
}
/**
* i40e_parse_cee_tlv
* @tlv: CEE DCBX TLV
* @dcbcfg: Local store to update DCBX config data
*
* Get the TLV subtype and send it to parsing function
* based on the subtype value
**/
static void i40e_parse_cee_tlv(struct i40e_lldp_org_tlv *tlv,
struct i40e_dcbx_config *dcbcfg)
{
u16 len, tlvlen, sublen, typelength;
struct i40e_cee_feat_tlv *sub_tlv;
u8 subtype, feat_tlv_count = 0;
u32 ouisubtype;
ouisubtype = ntohl(tlv->ouisubtype);
subtype = (u8)((ouisubtype & I40E_LLDP_TLV_SUBTYPE_MASK) >>
I40E_LLDP_TLV_SUBTYPE_SHIFT);
/* Return if not CEE DCBX */
if (subtype != I40E_CEE_DCBX_TYPE)
return;
typelength = ntohs(tlv->typelength);
tlvlen = (u16)((typelength & I40E_LLDP_TLV_LEN_MASK) >>
I40E_LLDP_TLV_LEN_SHIFT);
len = sizeof(tlv->typelength) + sizeof(ouisubtype) +
sizeof(struct i40e_cee_ctrl_tlv);
/* Return if no CEE DCBX Feature TLVs */
if (tlvlen <= len)
return;
sub_tlv = (struct i40e_cee_feat_tlv *)((char *)tlv + len);
while (feat_tlv_count < I40E_CEE_MAX_FEAT_TYPE) {
typelength = ntohs(sub_tlv->hdr.typelen);
sublen = (u16)((typelength &
I40E_LLDP_TLV_LEN_MASK) >>
I40E_LLDP_TLV_LEN_SHIFT);
subtype = (u8)((typelength & I40E_LLDP_TLV_TYPE_MASK) >>
I40E_LLDP_TLV_TYPE_SHIFT);
switch (subtype) {
case I40E_CEE_SUBTYPE_PG_CFG:
i40e_parse_cee_pgcfg_tlv(sub_tlv, dcbcfg);
break;
case I40E_CEE_SUBTYPE_PFC_CFG:
i40e_parse_cee_pfccfg_tlv(sub_tlv, dcbcfg);
break;
case I40E_CEE_SUBTYPE_APP_PRI:
i40e_parse_cee_app_tlv(sub_tlv, dcbcfg);
break;
default:
return; /* Invalid Sub-type return */
}
feat_tlv_count++;
/* Move to next sub TLV */
sub_tlv = (struct i40e_cee_feat_tlv *)((char *)sub_tlv +
sizeof(sub_tlv->hdr.typelen) +
sublen);
}
}
/**
* i40e_parse_org_tlv
* @tlv: Organization specific TLV
......@@ -312,6 +488,9 @@ static void i40e_parse_org_tlv(struct i40e_lldp_org_tlv *tlv,
case I40E_IEEE_8021QAZ_OUI:
i40e_parse_ieee_tlv(tlv, dcbcfg);
break;
case I40E_CEE_DCBX_OUI:
i40e_parse_cee_tlv(tlv, dcbcfg);
break;
default:
break;
}
......@@ -502,15 +681,18 @@ static void i40e_cee_to_dcb_config(
/* CEE PG data to ETS config */
dcbcfg->etscfg.maxtcs = cee_cfg->oper_num_tc;
/* Note that the FW creates the oper_prio_tc nibbles reversed
* from those in the CEE Priority Group sub-TLV.
*/
for (i = 0; i < 4; i++) {
tc = (u8)((cee_cfg->oper_prio_tc[i] &
I40E_CEE_PGID_PRIO_1_MASK) >>
I40E_CEE_PGID_PRIO_1_SHIFT);
dcbcfg->etscfg.prioritytable[i*2] = tc;
tc = (u8)((cee_cfg->oper_prio_tc[i] &
I40E_CEE_PGID_PRIO_0_MASK) >>
I40E_CEE_PGID_PRIO_0_SHIFT);
dcbcfg->etscfg.prioritytable[i*2 + 1] = tc;
dcbcfg->etscfg.prioritytable[i * 2] = tc;
tc = (u8)((cee_cfg->oper_prio_tc[i] &
I40E_CEE_PGID_PRIO_1_MASK) >>
I40E_CEE_PGID_PRIO_1_SHIFT);
dcbcfg->etscfg.prioritytable[i * 2 + 1] = tc;
}
for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
......@@ -582,6 +764,36 @@ static void i40e_cee_to_dcb_config(
dcbcfg->numapps = i;
}
/**
* i40e_get_ieee_dcb_config
* @hw: pointer to the hw struct
*
* Get IEEE mode DCB configuration from the Firmware
**/
static i40e_status i40e_get_ieee_dcb_config(struct i40e_hw *hw)
{
i40e_status ret = 0;
/* IEEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
/* Get Local DCB Config */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
&hw->local_dcbx_config);
if (ret)
goto out;
/* Get Remote DCB Config */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
&hw->remote_dcbx_config);
/* Don't treat ENOENT as an error for Remote MIBs */
if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
ret = 0;
out:
return ret;
}
/**
* i40e_get_dcb_config
* @hw: pointer to the hw struct
......@@ -597,7 +809,7 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
/* If Firmware version < v4.33 IEEE only */
if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
(hw->aq.fw_maj_ver < 4))
goto ieee;
return i40e_get_ieee_dcb_config(hw);
/* If Firmware version == v4.33 use old CEE struct */
if ((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver == 33)) {
......@@ -626,16 +838,14 @@ i40e_status i40e_get_dcb_config(struct i40e_hw *hw)
/* CEE mode not enabled try querying IEEE data */
if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT)
goto ieee;
else
return i40e_get_ieee_dcb_config(hw);
if (ret)
goto out;
ieee:
/* IEEE mode */
hw->local_dcbx_config.dcbx_mode = I40E_DCBX_MODE_IEEE;
/* Get Local DCB Config */
/* Get CEE DCB Desired Config */
ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_LOCAL, 0,
&hw->local_dcbx_config);
&hw->desired_dcbx_config);
if (ret)
goto out;
......
......@@ -44,6 +44,15 @@
#define I40E_IEEE_SUBTYPE_PFC_CFG 11
#define I40E_IEEE_SUBTYPE_APP_PRI 12
#define I40E_CEE_DCBX_OUI 0x001b21
#define I40E_CEE_DCBX_TYPE 2
#define I40E_CEE_SUBTYPE_CTRL 1
#define I40E_CEE_SUBTYPE_PG_CFG 2
#define I40E_CEE_SUBTYPE_PFC_CFG 3
#define I40E_CEE_SUBTYPE_APP_PRI 4
#define I40E_CEE_MAX_FEAT_TYPE 3
/* Defines for LLDP TLV header */
#define I40E_LLDP_TLV_LEN_SHIFT 0
#define I40E_LLDP_TLV_LEN_MASK (0x01FF << I40E_LLDP_TLV_LEN_SHIFT)
......@@ -98,6 +107,36 @@ struct i40e_lldp_org_tlv {
__be32 ouisubtype;
u8 tlvinfo[1];
};
struct i40e_cee_tlv_hdr {
__be16 typelen;
u8 operver;
u8 maxver;
};
struct i40e_cee_ctrl_tlv {
struct i40e_cee_tlv_hdr hdr;
__be32 seqno;
__be32 ackno;
};
struct i40e_cee_feat_tlv {
struct i40e_cee_tlv_hdr hdr;
u8 en_will_err; /* Bits: |En|Will|Err|Reserved(5)| */
#define I40E_CEE_FEAT_TLV_ENABLE_MASK 0x80
#define I40E_CEE_FEAT_TLV_WILLING_MASK 0x40
#define I40E_CEE_FEAT_TLV_ERR_MASK 0x20
u8 subtype;
u8 tlvinfo[1];
};
struct i40e_cee_app_prio {
__be16 protocol;
u8 upper_oui_sel; /* Bits: |Upper OUI(6)|Selector(2)| */
#define I40E_CEE_APP_SELECTOR_MASK 0x03
__be16 lower_oui;
u8 prio_map;
};
#pragma pack()
i40e_status i40e_get_dcbx_status(struct i40e_hw *hw,
......
......@@ -236,6 +236,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_dcb_app_priority_table *app)
{
int v, err;
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && pf->vsi[v]->netdev) {
err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
......
......@@ -404,82 +404,82 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
nstat = i40e_get_vsi_stats_struct(vsi);
dev_info(&pf->pdev->dev,
" net_stats: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
(long unsigned int)nstat->rx_packets,
(long unsigned int)nstat->rx_bytes,
(long unsigned int)nstat->rx_errors,
(long unsigned int)nstat->rx_dropped);
(unsigned long int)nstat->rx_packets,
(unsigned long int)nstat->rx_bytes,
(unsigned long int)nstat->rx_errors,
(unsigned long int)nstat->rx_dropped);
dev_info(&pf->pdev->dev,
" net_stats: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
(long unsigned int)nstat->tx_packets,
(long unsigned int)nstat->tx_bytes,
(long unsigned int)nstat->tx_errors,
(long unsigned int)nstat->tx_dropped);
(unsigned long int)nstat->tx_packets,
(unsigned long int)nstat->tx_bytes,
(unsigned long int)nstat->tx_errors,
(unsigned long int)nstat->tx_dropped);
dev_info(&pf->pdev->dev,
" net_stats: multicast = %lu, collisions = %lu\n",
(long unsigned int)nstat->multicast,
(long unsigned int)nstat->collisions);
(unsigned long int)nstat->multicast,
(unsigned long int)nstat->collisions);
dev_info(&pf->pdev->dev,
" net_stats: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
(long unsigned int)nstat->rx_length_errors,
(long unsigned int)nstat->rx_over_errors,
(long unsigned int)nstat->rx_crc_errors);
(unsigned long int)nstat->rx_length_errors,
(unsigned long int)nstat->rx_over_errors,
(unsigned long int)nstat->rx_crc_errors);
dev_info(&pf->pdev->dev,
" net_stats: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
(long unsigned int)nstat->rx_frame_errors,
(long unsigned int)nstat->rx_fifo_errors,
(long unsigned int)nstat->rx_missed_errors);
(unsigned long int)nstat->rx_frame_errors,
(unsigned long int)nstat->rx_fifo_errors,
(unsigned long int)nstat->rx_missed_errors);
dev_info(&pf->pdev->dev,
" net_stats: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
(long unsigned int)nstat->tx_aborted_errors,
(long unsigned int)nstat->tx_carrier_errors,
(long unsigned int)nstat->tx_fifo_errors);
(unsigned long int)nstat->tx_aborted_errors,
(unsigned long int)nstat->tx_carrier_errors,
(unsigned long int)nstat->tx_fifo_errors);
dev_info(&pf->pdev->dev,
" net_stats: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
(long unsigned int)nstat->tx_heartbeat_errors,
(long unsigned int)nstat->tx_window_errors);
(unsigned long int)nstat->tx_heartbeat_errors,
(unsigned long int)nstat->tx_window_errors);
dev_info(&pf->pdev->dev,
" net_stats: rx_compressed = %lu, tx_compressed = %lu\n",
(long unsigned int)nstat->rx_compressed,
(long unsigned int)nstat->tx_compressed);
(unsigned long int)nstat->rx_compressed,
(unsigned long int)nstat->tx_compressed);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_packets = %lu, rx_bytes = %lu, rx_errors = %lu, rx_dropped = %lu\n",
(long unsigned int)vsi->net_stats_offsets.rx_packets,
(long unsigned int)vsi->net_stats_offsets.rx_bytes,
(long unsigned int)vsi->net_stats_offsets.rx_errors,
(long unsigned int)vsi->net_stats_offsets.rx_dropped);
(unsigned long int)vsi->net_stats_offsets.rx_packets,
(unsigned long int)vsi->net_stats_offsets.rx_bytes,
(unsigned long int)vsi->net_stats_offsets.rx_errors,
(unsigned long int)vsi->net_stats_offsets.rx_dropped);
dev_info(&pf->pdev->dev,
" net_stats_offsets: tx_packets = %lu, tx_bytes = %lu, tx_errors = %lu, tx_dropped = %lu\n",
(long unsigned int)vsi->net_stats_offsets.tx_packets,
(long unsigned int)vsi->net_stats_offsets.tx_bytes,
(long unsigned int)vsi->net_stats_offsets.tx_errors,
(long unsigned int)vsi->net_stats_offsets.tx_dropped);
(unsigned long int)vsi->net_stats_offsets.tx_packets,
(unsigned long int)vsi->net_stats_offsets.tx_bytes,
(unsigned long int)vsi->net_stats_offsets.tx_errors,
(unsigned long int)vsi->net_stats_offsets.tx_dropped);
dev_info(&pf->pdev->dev,
" net_stats_offsets: multicast = %lu, collisions = %lu\n",
(long unsigned int)vsi->net_stats_offsets.multicast,
(long unsigned int)vsi->net_stats_offsets.collisions);
(unsigned long int)vsi->net_stats_offsets.multicast,
(unsigned long int)vsi->net_stats_offsets.collisions);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_length_errors = %lu, rx_over_errors = %lu, rx_crc_errors = %lu\n",
(long unsigned int)vsi->net_stats_offsets.rx_length_errors,
(long unsigned int)vsi->net_stats_offsets.rx_over_errors,
(long unsigned int)vsi->net_stats_offsets.rx_crc_errors);
(unsigned long int)vsi->net_stats_offsets.rx_length_errors,
(unsigned long int)vsi->net_stats_offsets.rx_over_errors,
(unsigned long int)vsi->net_stats_offsets.rx_crc_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_frame_errors = %lu, rx_fifo_errors = %lu, rx_missed_errors = %lu\n",
(long unsigned int)vsi->net_stats_offsets.rx_frame_errors,
(long unsigned int)vsi->net_stats_offsets.rx_fifo_errors,
(long unsigned int)vsi->net_stats_offsets.rx_missed_errors);
(unsigned long int)vsi->net_stats_offsets.rx_frame_errors,
(unsigned long int)vsi->net_stats_offsets.rx_fifo_errors,
(unsigned long int)vsi->net_stats_offsets.rx_missed_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: tx_aborted_errors = %lu, tx_carrier_errors = %lu, tx_fifo_errors = %lu\n",
(long unsigned int)vsi->net_stats_offsets.tx_aborted_errors,
(long unsigned int)vsi->net_stats_offsets.tx_carrier_errors,
(long unsigned int)vsi->net_stats_offsets.tx_fifo_errors);
(unsigned long int)vsi->net_stats_offsets.tx_aborted_errors,
(unsigned long int)vsi->net_stats_offsets.tx_carrier_errors,
(unsigned long int)vsi->net_stats_offsets.tx_fifo_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: tx_heartbeat_errors = %lu, tx_window_errors = %lu\n",
(long unsigned int)vsi->net_stats_offsets.tx_heartbeat_errors,
(long unsigned int)vsi->net_stats_offsets.tx_window_errors);
(unsigned long int)vsi->net_stats_offsets.tx_heartbeat_errors,
(unsigned long int)vsi->net_stats_offsets.tx_window_errors);
dev_info(&pf->pdev->dev,
" net_stats_offsets: rx_compressed = %lu, tx_compressed = %lu\n",
(long unsigned int)vsi->net_stats_offsets.rx_compressed,
(long unsigned int)vsi->net_stats_offsets.tx_compressed);
(unsigned long int)vsi->net_stats_offsets.rx_compressed,
(unsigned long int)vsi->net_stats_offsets.tx_compressed);
dev_info(&pf->pdev->dev,
" tx_restart = %d, tx_busy = %d, rx_buf_failed = %d, rx_page_failed = %d\n",
vsi->tx_restart, vsi->tx_busy,
......@@ -487,6 +487,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]);
if (!rx_ring)
continue;
......@@ -527,7 +528,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size,
(long unsigned int)rx_ring->dma);
(unsigned long int)rx_ring->dma);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: vsi = %p, q_vector = %p\n",
i, rx_ring->vsi,
......@@ -535,6 +536,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
}
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
if (!tx_ring)
continue;
......@@ -573,7 +575,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
dev_info(&pf->pdev->dev,
" tx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, tx_ring->size,
(long unsigned int)tx_ring->dma);
(unsigned long int)tx_ring->dma);
dev_info(&pf->pdev->dev,
" tx_rings[%i]: vsi = %p, q_vector = %p\n",
i, tx_ring->vsi,
......@@ -743,6 +745,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
ring = &(hw->aq.asq);
for (i = 0; i < ring->count; i++) {
struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" at[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
......@@ -755,6 +758,7 @@ static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf)
ring = &(hw->aq.arq);
for (i = 0; i < ring->count; i++) {
struct i40e_aq_desc *d = I40E_ADMINQ_DESC(*ring, i);
dev_info(&pf->pdev->dev,
" ar[%02d] flags=0x%04x op=0x%04x dlen=0x%04x ret=0x%04x cookie_h=0x%08x cookie_l=0x%08x\n",
i, d->flags, d->opcode, d->datalen, d->retval,
......@@ -1038,7 +1042,13 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf);
} else if (strncmp(cmd_buf, "del vsi", 7) == 0) {
sscanf(&cmd_buf[7], "%i", &vsi_seid);
cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid);
if (cnt != 1) {
dev_info(&pf->pdev->dev,
"del vsi: bad command string, cnt=%d\n",
cnt);
goto command_write_done;
}
vsi = i40e_dbg_find_vsi(pf, vsi_seid);
if (!vsi) {
dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n",
......@@ -1488,6 +1498,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "read", 4) == 0) {
u32 address;
u32 value;
cnt = sscanf(&cmd_buf[4], "%i", &address);
if (cnt != 1) {
dev_info(&pf->pdev->dev, "read <reg>\n");
......@@ -1507,6 +1518,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "write", 5) == 0) {
u32 address, value;
cnt = sscanf(&cmd_buf[5], "%i %i", &address, &value);
if (cnt != 2) {
dev_info(&pf->pdev->dev, "write <reg> <value>\n");
......@@ -1528,6 +1540,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
for (i = 0; i < pf->num_alloc_vsi; i++)
i40e_vsi_reset_stats(pf->vsi[i]);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
......@@ -1726,8 +1739,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
packet_len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
for (i = 0; i < packet_len; i++) {
sscanf(&asc_packet[j], "%2hhx ",
&raw_packet[i]);
cnt = sscanf(&asc_packet[j], "%2hhx ", &raw_packet[i]);
if (!cnt)
break;
j += 3;
}
dev_info(&pf->pdev->dev, "FD raw packet dump\n");
......@@ -1755,6 +1769,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
ret = i40e_aq_stop_lldp(&pf->hw, false, NULL);
if (ret) {
dev_info(&pf->pdev->dev,
......@@ -1779,6 +1794,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
#endif /* CONFIG_I40E_DCB */
} else if (strncmp(&cmd_buf[5], "start", 5) == 0) {
int ret;
ret = i40e_aq_add_rem_control_packet_filter(&pf->hw,
pf->hw.mac.addr,
I40E_ETH_P_LLDP, 0,
......@@ -1807,6 +1823,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
u16 llen, rlen;
int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
......@@ -1833,6 +1850,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
u16 llen, rlen;
int ret;
u8 *buff;
buff = kzalloc(I40E_LLDPDU_SIZE, GFP_KERNEL);
if (!buff)
goto command_write_done;
......@@ -1858,6 +1876,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
buff = NULL;
} else if (strncmp(&cmd_buf[5], "event on", 8) == 0) {
int ret;
ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
true, NULL);
if (ret) {
......@@ -1868,6 +1887,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
}
} else if (strncmp(&cmd_buf[5], "event off", 9) == 0) {
int ret;
ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw,
false, NULL);
if (ret) {
......@@ -2105,6 +2125,7 @@ static ssize_t i40e_dbg_netdev_ops_write(struct file *filp,
}
} else if (strncmp(i40e_dbg_netdev_ops_buf, "change_mtu", 10) == 0) {
int mtu;
cnt = sscanf(&i40e_dbg_netdev_ops_buf[11], "%i %i",
&vsi_seid, &mtu);
if (cnt != 2) {
......@@ -2220,7 +2241,6 @@ void i40e_dbg_pf_init(struct i40e_pf *pf)
create_failed:
dev_info(dev, "debugfs dir/file for %s failed\n", name);
debugfs_remove_recursive(pf->i40e_dbg_pf);
return;
}
/**
......
......@@ -230,10 +230,10 @@ static const char i40e_gstrings_test[][ETH_GSTRING_LEN] = {
static const char i40e_priv_flags_strings[][ETH_GSTRING_LEN] = {
"NPAR",
"LinkPolling",
};
#define I40E_PRIV_FLAGS_STR_LEN \
(sizeof(i40e_priv_flags_strings) / ETH_GSTRING_LEN)
#define I40E_PRIV_FLAGS_STR_LEN ARRAY_SIZE(i40e_priv_flags_strings)
/**
* i40e_partition_setting_complaint - generic complaint for MFP restriction
......@@ -690,7 +690,7 @@ static int i40e_set_settings(struct net_device *netdev,
/* Tell the OS link is going down, the link will go
* back up when fw says it is ready asynchronously
*/
netdev_info(netdev, "PHY settings change requested, NIC Link is going down.\n");
i40e_print_link_message(vsi, false);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
}
......@@ -834,7 +834,7 @@ static int i40e_set_pauseparam(struct net_device *netdev,
/* Tell the OS link is going down, the link will go back up when fw
* says it is ready asynchronously
*/
netdev_info(netdev, "Flow control settings change requested, NIC Link is going down.\n");
i40e_print_link_message(vsi, false);
netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev);
......@@ -1176,6 +1176,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_count;
/* the desc and bi pointers will be reallocated in the
* setup call
*/
tx_rings[i].desc = NULL;
tx_rings[i].rx_bi = NULL;
err = i40e_setup_tx_descriptors(&tx_rings[i]);
if (err) {
while (i) {
......@@ -1206,6 +1211,11 @@ static int i40e_set_ringparam(struct net_device *netdev,
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_count;
/* the desc and bi pointers will be reallocated in the
* setup call
*/
rx_rings[i].desc = NULL;
rx_rings[i].rx_bi = NULL;
err = i40e_setup_rx_descriptors(&rx_rings[i]);
if (err) {
while (i) {
......@@ -1350,6 +1360,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
if ((pf->lan_veb != I40E_NO_VEB) &&
(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
struct i40e_veb *veb = pf->veb[pf->lan_veb];
for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
p = (char *)veb;
p += i40e_gstrings_veb_stats[j].stat_offset;
......@@ -1597,7 +1608,7 @@ static inline bool i40e_active_vfs(struct i40e_pf *pf)
int i;
for (i = 0; i < pf->num_alloc_vfs; i++)
if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
if (test_bit(I40E_VF_STAT_ACTIVE, &vfs[i].vf_states))
return true;
return false;
}
......@@ -2626,10 +2637,31 @@ static u32 i40e_get_priv_flags(struct net_device *dev)
ret_flags |= pf->hw.func_caps.npar_enable ?
I40E_PRIV_FLAGS_NPAR_FLAG : 0;
ret_flags |= pf->flags & I40E_FLAG_LINK_POLLING_ENABLED ?
I40E_PRIV_FLAGS_LINKPOLL_FLAG : 0;
return ret_flags;
}
/**
* i40e_set_priv_flags - set private flags
* @dev: network interface device structure
* @flags: bit flags to be set
**/
static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
{
struct i40e_netdev_priv *np = netdev_priv(dev);
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
if (flags & I40E_PRIV_FLAGS_LINKPOLL_FLAG)
pf->flags |= I40E_FLAG_LINK_POLLING_ENABLED;
else
pf->flags &= ~I40E_FLAG_LINK_POLLING_ENABLED;
return 0;
}
static const struct ethtool_ops i40e_ethtool_ops = {
.get_settings = i40e_get_settings,
.set_settings = i40e_set_settings,
......@@ -2666,6 +2698,7 @@ static const struct ethtool_ops i40e_ethtool_ops = {
.set_channels = i40e_set_channels,
.get_ts_info = i40e_get_ts_info,
.get_priv_flags = i40e_get_priv_flags,
.set_priv_flags = i40e_set_priv_flags,
};
void i40e_set_ethtool_ops(struct net_device *netdev)
......
......@@ -324,7 +324,6 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf)
wr32(hw, I40E_GLFCOE_RCTL, val);
dev_info(&pf->pdev->dev, "FCoE is supported.\n");
return;
}
/**
......
......@@ -431,9 +431,8 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
pd_idx1 = max(pd_idx,
((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++) {
for (i = pd_idx1; i < pd_lmt1; i++)
i40e_remove_pd_bp(hw, info->hmc_info, i);
}
i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break;
case I40E_SD_TYPE_DIRECT:
......
......@@ -216,10 +216,10 @@ static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
ret = i;
pile->search_hint = i + j;
break;
} else {
/* not enough, so skip over it and continue looking */
i += j;
}
/* not enough, so skip over it and continue looking */
i += j;
}
return ret;
......@@ -503,11 +503,11 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
if (vsi->rx_rings && vsi->rx_rings[0]) {
for (i = 0; i < vsi->num_queue_pairs; i++) {
memset(&vsi->rx_rings[i]->stats, 0 ,
memset(&vsi->rx_rings[i]->stats, 0,
sizeof(vsi->rx_rings[i]->stats));
memset(&vsi->rx_rings[i]->rx_stats, 0 ,
memset(&vsi->rx_rings[i]->rx_stats, 0,
sizeof(vsi->rx_rings[i]->rx_stats));
memset(&vsi->tx_rings[i]->stats, 0 ,
memset(&vsi->tx_rings[i]->stats, 0,
sizeof(vsi->tx_rings[i]->stats));
memset(&vsi->tx_rings[i]->tx_stats, 0,
sizeof(vsi->tx_rings[i]->tx_stats));
......@@ -843,6 +843,7 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
u64 prio_xoff = nsd->priority_xoff_rx[i];
i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
pf->stat_offsets_loaded,
&osd->priority_xoff_rx[i],
......@@ -1439,6 +1440,7 @@ void i40e_del_filter(struct i40e_vsi *vsi,
} else {
/* make sure we don't remove a filter in use by VF or netdev */
int min_f = 0;
min_f += (f->is_vf ? 1 : 0);
min_f += (f->is_netdev ? 1 : 0);
......@@ -1497,6 +1499,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
if (vsi->type == I40E_VSI_MAIN) {
i40e_status ret;
ret = i40e_aq_mac_address_write(&vsi->back->hw,
I40E_AQC_WRITE_TYPE_LAA_WOL,
addr->sa_data, NULL);
......@@ -1932,6 +1935,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
/* check for changes in promiscuous modes */
if (changed_flags & IFF_ALLMULTI) {
bool cur_multipromisc;
cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
vsi->seid,
......@@ -1946,6 +1950,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
}
if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
bool cur_promisc;
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
&vsi->state));
......@@ -3059,24 +3064,6 @@ void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
i40e_flush(hw);
}
/**
* i40e_irq_dynamic_enable - Enable default interrupt generation settings
* @vsi: pointer to a vsi
* @vector: enable a particular Hw Interrupt vector, without base_vector
**/
void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
{
struct i40e_pf *pf = vsi->back;
struct i40e_hw *hw = &pf->hw;
u32 val;
val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTLN(vector + vsi->base_vector - 1), val);
/* skip the flush */
}
/**
* i40e_irq_dynamic_disable - Disable default interrupt generation settings
* @vsi: pointer to a vsi
......@@ -3277,6 +3264,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
/* temporarily disable queue cause for NAPI processing */
u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_RQCTL(0), qval);
......@@ -3446,9 +3434,9 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
i += tx_ring->count;
tx_ring->next_to_clean = i;
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
}
return budget > 0;
}
......@@ -4052,11 +4040,10 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
}
set_bit(__I40E_NEEDS_RESTART, &vsi->state);
if (vsi->netdev && netif_running(vsi->netdev)) {
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
} else {
else
i40e_vsi_close(vsi);
}
}
/**
......@@ -4855,11 +4842,14 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
* i40e_print_link_message - print link up or down
* @vsi: the VSI for which link needs a message
*/
static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
{
char speed[SPEED_SIZE] = "Unknown";
char fc[FC_SIZE] = "RX/TX";
if (vsi->current_isup == isup)
return;
vsi->current_isup = isup;
if (!isup) {
netdev_info(vsi->netdev, "NIC Link is Down\n");
return;
......@@ -5328,15 +5318,13 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
"VSI reinit requested\n");
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
i40e_vsi_reinit_locked(pf->vsi[v]);
clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
}
}
/* no further action needed, so return now */
return;
} else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
int v;
......@@ -5344,6 +5332,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
dev_info(&pf->pdev->dev, "VSI down requested\n");
for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
set_bit(__I40E_DOWN, &vsi->state);
......@@ -5351,13 +5340,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
}
}
/* no further action needed, so return now */
return;
} else {
dev_info(&pf->pdev->dev,
"bad reset request 0x%08x\n", reset_flags);
return;
}
}
......@@ -5632,7 +5617,9 @@ u32 i40e_get_global_fd_count(struct i40e_pf *pf)
**/
void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
{
struct i40e_fdir_filter *filter;
u32 fcnt_prog, fcnt_avail;
struct hlist_node *node;
if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
return;
......@@ -5661,6 +5648,18 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
}
}
/* if hw had a problem adding a filter, delete it */
if (pf->fd_inv > 0) {
hlist_for_each_entry_safe(filter, node,
&pf->fdir_filter_list, fdir_node) {
if (filter->fd_id == pf->fd_inv) {
hlist_del(&filter->fdir_node);
kfree(filter);
pf->fdir_pf_active_filters--;
}
}
}
}
#define I40E_MIN_FD_FLUSH_INTERVAL 10
......@@ -5680,49 +5679,51 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
return;
if (time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ))) {
/* If the flush is happening too quick and we have mostly
* SB rules we should not re-enable ATR for some time.
*/
min_flush_time = pf->fd_flush_timestamp
+ (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
if (!time_after(jiffies, pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
return;
if (!(time_after(jiffies, min_flush_time)) &&
(fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
disable_atr = true;
}
/* If the flush is happening too quick and we have mostly SB rules we
* should not re-enable ATR for some time.
*/
min_flush_time = pf->fd_flush_timestamp +
(I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
pf->fd_flush_timestamp = jiffies;
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
i40e_flush(&pf->hw);
pf->fd_flush_cnt++;
pf->fd_add_err = 0;
do {
/* Check FD flush status every 5-6msec */
usleep_range(5000, 6000);
reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
break;
} while (flush_wait_retry--);
if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
} else {
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr)
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
if (!(time_after(jiffies, min_flush_time)) &&
(fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
disable_atr = true;
}
pf->fd_flush_timestamp = jiffies;
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
/* flush all filters */
wr32(&pf->hw, I40E_PFQF_CTL_1,
I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
i40e_flush(&pf->hw);
pf->fd_flush_cnt++;
pf->fd_add_err = 0;
do {
/* Check FD flush status every 5-6msec */
usleep_range(5000, 6000);
reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
break;
} while (flush_wait_retry--);
if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
} else {
/* replay sideband filters */
i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
if (!disable_atr)
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
if (I40E_DEBUG_FD & pf->hw.debug_mask)
dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
}
/**
......@@ -5893,7 +5894,8 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
return;
pf->service_timer_previous = jiffies;
i40e_link_event(pf);
if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
i40e_link_event(pf);
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
......@@ -6286,6 +6288,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
if (pf->vsi[v]->veb_idx == veb->idx) {
struct i40e_vsi *vsi = pf->vsi[v];
vsi->uplink_seid = veb->seid;
ret = i40e_add_vsi(vsi);
if (ret) {
......@@ -7909,6 +7912,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
/* Set default capability flags */
pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
I40E_FLAG_MSI_ENABLED |
I40E_FLAG_LINK_POLLING_ENABLED |
I40E_FLAG_MSIX_ENABLED;
if (iommu_present(&pci_bus_type))
......@@ -9699,6 +9703,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
} else {
/* force a reset of TC and queue layout configurations */
u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
......@@ -9909,6 +9914,9 @@ static void i40e_print_features(struct i40e_pf *pf)
}
if (pf->flags & I40E_FLAG_DCB_CAPABLE)
buf += sprintf(buf, "DCB ");
#if IS_ENABLED(CONFIG_VXLAN)
buf += sprintf(buf, "VxLAN ");
#endif
if (pf->flags & I40E_FLAG_PTP)
buf += sprintf(buf, "PTP ");
#ifdef I40E_FCOE
......
......@@ -618,9 +618,8 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
/* Attempt to register the clock before enabling the hardware. */
pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
if (IS_ERR(pf->ptp_clock)) {
if (IS_ERR(pf->ptp_clock))
return PTR_ERR(pf->ptp_clock);
}
/* clear the hwtstamp settings here during clock create, instead of
* during regular init, so that we can maintain settings across a
......
......@@ -465,10 +465,11 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
if (error == BIT(I40E_RX_PROG_STATUS_DESC_FD_TBL_FULL_SHIFT)) {
pf->fd_inv = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fd_id);
if ((rx_desc->wb.qword0.hi_dword.fd_id != 0) ||
(I40E_DEBUG_FD & pf->hw.debug_mask))
dev_warn(&pdev->dev, "ntuple filter loc = %d, could not be added\n",
rx_desc->wb.qword0.hi_dword.fd_id);
pf->fd_inv);
/* Check if the programming error is for ATR.
* If so, auto disable ATR and set a state for
......@@ -1521,6 +1522,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
cleaned_count++;
if (rx_hbo || rx_sph) {
int len;
if (rx_hbo)
len = I40E_RX_HDR_SIZE;
else
......@@ -1706,9 +1708,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
*/
continue;
}
......@@ -2080,6 +2079,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
return -EINVAL;
......@@ -2739,6 +2739,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u8 hdr_len = 0;
int tsyn;
int tso;
if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
......
......@@ -165,6 +165,7 @@ struct i40e_tx_buffer {
};
unsigned int bytecount;
unsigned short gso_segs;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
......
......@@ -160,14 +160,14 @@ enum i40e_set_fc_aq_failures {
};
enum i40e_vsi_type {
I40E_VSI_MAIN = 0,
I40E_VSI_VMDQ1,
I40E_VSI_VMDQ2,
I40E_VSI_CTRL,
I40E_VSI_FCOE,
I40E_VSI_MIRROR,
I40E_VSI_SRIOV,
I40E_VSI_FDIR,
I40E_VSI_MAIN = 0,
I40E_VSI_VMDQ1 = 1,
I40E_VSI_VMDQ2 = 2,
I40E_VSI_CTRL = 3,
I40E_VSI_FCOE = 4,
I40E_VSI_MIRROR = 5,
I40E_VSI_SRIOV = 6,
I40E_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN
};
......@@ -510,8 +510,9 @@ struct i40e_hw {
u16 dcbx_status;
/* DCBX info */
struct i40e_dcbx_config local_dcbx_config;
struct i40e_dcbx_config remote_dcbx_config;
struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
/* debug mask */
u32 debug_mask;
......
......@@ -536,6 +536,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
}
if (type == I40E_VSI_SRIOV) {
u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
vf->lan_vsi_idx = vsi->idx;
vf->lan_vsi_id = vsi->id;
/* If the port VLAN has been configured and then the
......@@ -605,6 +606,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
/* map PF queues to VF queues */
for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) {
u16 qid = i40e_vc_get_pf_queue_id(vf, vf->lan_vsi_id, j);
reg = (qid & I40E_VPLAN_QTABLE_QINDEX_MASK);
wr32(hw, I40E_VPLAN_QTABLE(total_queue_pairs, vf->vf_id), reg);
total_queue_pairs++;
......@@ -991,24 +993,26 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev);
int err = 0;
if (pf->state & __I40E_TESTING) {
if (test_bit(__I40E_TESTING, &pf->state)) {
dev_warn(&pdev->dev,
"Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
err = -EPERM;
goto err_out;
}
dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
i40e_free_vfs(pf);
else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
goto out;
if (num_vfs > pf->num_req_vfs) {
dev_warn(&pdev->dev, "Unable to enable %d VFs. Limited to %d VFs due to device resource constraints.\n",
num_vfs, pf->num_req_vfs);
err = -EPERM;
goto err_out;
}
dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
err = i40e_alloc_vfs(pf, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable SR-IOV: %d\n", err);
......@@ -1206,10 +1210,12 @@ static int i40e_vc_get_vf_resources_msg(struct i40e_vf *vf, u8 *msg)
if (vf->lan_vsi_idx) {
vfres->vsi_res[i].vsi_id = vf->lan_vsi_id;
vfres->vsi_res[i].vsi_type = I40E_VSI_SRIOV;
vfres->vsi_res[i].num_queue_pairs =
pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs;
memcpy(vfres->vsi_res[i].default_mac_addr,
vf->default_lan_addr.addr, ETH_ALEN);
vfres->vsi_res[i].num_queue_pairs = vsi->alloc_queue_pairs;
/* VFs only use TC 0 */
vfres->vsi_res[i].qset_handle
= le16_to_cpu(vsi->info.qs_handle[0]);
ether_addr_copy(vfres->vsi_res[i].default_mac_addr,
vf->default_lan_addr.addr);
i++;
}
set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
......@@ -1713,6 +1719,7 @@ static int i40e_vc_add_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
/* add new VLAN filter */
int ret = i40e_vsi_add_vlan(vsi, vfl->vlan_id[i]);
if (ret)
dev_err(&pf->pdev->dev,
"Unable to add VF vlan filter %d, error %d\n",
......@@ -1764,6 +1771,7 @@ static int i40e_vc_remove_vlan_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
for (i = 0; i < vfl->num_elements; i++) {
int ret = i40e_vsi_kill_vlan(vsi, vfl->vlan_id[i]);
if (ret)
dev_err(&pf->pdev->dev,
"Unable to delete VF vlan filter %d, error %d\n",
......@@ -1875,7 +1883,6 @@ static int i40e_vc_validate_vf_msg(struct i40e_vf *vf, u32 v_opcode,
case I40E_VIRTCHNL_OP_UNKNOWN:
default:
return -EPERM;
break;
}
/* few more checks */
if ((valid_len != msglen) || (err_msg_format)) {
......@@ -2314,7 +2321,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
ivi->vf = vf_id;
memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
ether_addr_copy(ivi->mac, vf->default_lan_addr.addr);
ivi->max_tx_rate = vf->tx_rate;
ivi->min_tx_rate = 0;
......
......@@ -622,8 +622,7 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"%s: ntc %d head %d.\n", __func__, ntc,
rd32(hw, hw->aq.asq.head));
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
I40E_ADMINQ_CALLBACK cb_func =
......
......@@ -992,10 +992,10 @@ void i40e_vf_parse_hw_config(struct i40e_hw *hw,
I40E_VIRTCHNL_VF_OFFLOAD_FCOE) ? 1 : 0;
for (i = 0; i < msg->num_vsis; i++) {
if (vsi_res->vsi_type == I40E_VSI_SRIOV) {
memcpy(hw->mac.perm_addr, vsi_res->default_mac_addr,
ETH_ALEN);
memcpy(hw->mac.addr, vsi_res->default_mac_addr,
ETH_ALEN);
ether_addr_copy(hw->mac.perm_addr,
vsi_res->default_mac_addr);
ether_addr_copy(hw->mac.addr,
vsi_res->default_mac_addr);
}
vsi_res++;
}
......
......@@ -987,6 +987,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
cleaned_count++;
if (rx_hbo || rx_sph) {
int len;
if (rx_hbo)
len = I40E_RX_HDR_SIZE;
else
......@@ -1160,9 +1161,6 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & BIT(I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
/* TODO: shouldn't we increment a counter indicating the
* drop?
*/
continue;
}
......@@ -1358,6 +1356,7 @@ static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
/* else if it is a SW VLAN, check the next protocol and store the tag */
} else if (protocol == htons(ETH_P_8021Q)) {
struct vlan_hdr *vhdr, _vhdr;
vhdr = skb_header_pointer(skb, ETH_HLEN, sizeof(_vhdr), &_vhdr);
if (!vhdr)
return -EINVAL;
......@@ -1900,6 +1899,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso;
if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
......
......@@ -164,6 +164,7 @@ struct i40e_tx_buffer {
};
unsigned int bytecount;
unsigned short gso_segs;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
......
......@@ -160,14 +160,14 @@ enum i40e_set_fc_aq_failures {
};
enum i40e_vsi_type {
I40E_VSI_MAIN = 0,
I40E_VSI_VMDQ1,
I40E_VSI_VMDQ2,
I40E_VSI_CTRL,
I40E_VSI_FCOE,
I40E_VSI_MIRROR,
I40E_VSI_SRIOV,
I40E_VSI_FDIR,
I40E_VSI_MAIN = 0,
I40E_VSI_VMDQ1 = 1,
I40E_VSI_VMDQ2 = 2,
I40E_VSI_CTRL = 3,
I40E_VSI_FCOE = 4,
I40E_VSI_MIRROR = 5,
I40E_VSI_SRIOV = 6,
I40E_VSI_FDIR = 7,
I40E_VSI_TYPE_UNKNOWN
};
......@@ -504,8 +504,9 @@ struct i40e_hw {
u16 dcbx_status;
/* DCBX info */
struct i40e_dcbx_config local_dcbx_config;
struct i40e_dcbx_config remote_dcbx_config;
struct i40e_dcbx_config local_dcbx_config; /* Oper/Local Cfg */
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
/* debug mask */
u32 debug_mask;
......
......@@ -48,10 +48,6 @@
#define DEFAULT_DEBUG_LEVEL_SHIFT 3
#define PFX "i40evf: "
#define DPRINTK(nlevel, klevel, fmt, args...) \
((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
__func__ , ## args)))
/* dummy struct to make common code less painful */
struct i40e_vsi {
......@@ -70,6 +66,7 @@ struct i40e_vsi {
*/
u16 rx_itr_setting;
u16 tx_itr_setting;
u16 qs_handle;
};
/* How many Rx Buffers do we bundle into one write to the hardware ? */
......
......@@ -730,6 +730,8 @@ static int i40evf_vlan_rx_add_vid(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
if (!VLAN_ALLOWED(adapter))
return -EIO;
if (i40evf_add_vlan(adapter, vid) == NULL)
return -ENOMEM;
return 0;
......@@ -745,8 +747,11 @@ static int i40evf_vlan_rx_kill_vid(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
i40evf_del_vlan(adapter, vid);
return 0;
if (VLAN_ALLOWED(adapter)) {
i40evf_del_vlan(adapter, vid);
return 0;
}
return -EIO;
}
/**
......@@ -1419,16 +1424,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
struct i40evf_adapter,
watchdog_task);
struct i40e_hw *hw = &adapter->hw;
uint32_t rstat_val;
u32 reg_val;
if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
goto restart_watchdog;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if ((rstat_val == I40E_VFR_VFACTIVE) ||
(rstat_val == I40E_VFR_COMPLETED)) {
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if ((reg_val == I40E_VFR_VFACTIVE) ||
(reg_val == I40E_VFR_COMPLETED)) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
adapter->state = __I40EVF_STARTUP;
......@@ -1453,11 +1458,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto watchdog_done;
/* check for reset */
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
(rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED)) {
reg_val = rd32(hw, I40E_VF_ARQLEN1) & I40E_VF_ARQLEN1_ARQENABLE_MASK;
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) && !reg_val) {
adapter->state = __I40EVF_RESETTING;
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
......@@ -1572,7 +1574,7 @@ static void i40evf_reset_task(struct work_struct *work)
struct net_device *netdev = adapter->netdev;
struct i40e_hw *hw = &adapter->hw;
struct i40evf_mac_filter *f;
uint32_t rstat_val;
u32 reg_val;
int i = 0, err;
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
......@@ -1593,12 +1595,11 @@ static void i40evf_reset_task(struct work_struct *work)
/* poll until we see the reset actually happen */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if ((rstat_val != I40E_VFR_VFACTIVE) &&
(rstat_val != I40E_VFR_COMPLETED))
reg_val = rd32(hw, I40E_VF_ARQLEN1) &
I40E_VF_ARQLEN1_ARQENABLE_MASK;
if (!reg_val)
break;
usleep_range(500, 1000);
usleep_range(5000, 10000);
}
if (i == I40EVF_RESET_WAIT_COUNT) {
dev_info(&adapter->pdev->dev, "Never saw reset\n");
......@@ -1607,9 +1608,9 @@ static void i40evf_reset_task(struct work_struct *work)
/* wait until the reset is complete and the PF is responding to us */
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (rstat_val == I40E_VFR_VFACTIVE)
reg_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
if (reg_val == I40E_VFR_VFACTIVE)
break;
msleep(I40EVF_RESET_WAIT_MS);
}
......@@ -1621,7 +1622,7 @@ static void i40evf_reset_task(struct work_struct *work)
/* reset never finished */
dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
rstat_val);
reg_val);
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
if (netif_running(adapter->netdev)) {
......@@ -2114,6 +2115,7 @@ int i40evf_process_config(struct i40evf_adapter *adapter)
adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
adapter->vsi.netdev = adapter->netdev;
adapter->vsi.qs_handle = adapter->vsi_res->qset_handle;
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment