Commit ba197fde authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-01-19 (ice)

This series contains updates to ice driver only.

Tsotne and Anatolii implement new handling, and AdminQ command, for
firmware LLDP, adding a pending notification to allow for proper
cleanup between TC changes.

Amritha extends support for drop action outside of switchdev.

Siddaraju adjusts restriction for PTP HW clock adjustments.

Ani removes an unneeded non-null check and improves reporting of some link
modes to utilize more appropriate values.

Jesse adds checks to ensure PF VSI type.

Przemek combines duplicate checks of the same condition into one check.

Tony makes various cleanups to code: removes comments for cppcheck
suppressions, reduces scope of some variables, changes some return
statements to reflect an explicit 0 return, matches naming for function
declaration and definition, adds local variable for readability, and
fixes indenting.

Sergey separates DDP (Dynamic Device Personalization) code into its own
file.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f5339209 d52a6180
......@@ -28,6 +28,7 @@ ice-y := ice_main.o \
ice_flow.o \
ice_idc.o \
ice_devlink.o \
ice_ddp.o \
ice_fw_update.o \
ice_lag.o \
ice_ethtool.o \
......
......@@ -1659,14 +1659,24 @@ struct ice_aqc_lldp_get_mib {
#define ICE_AQ_LLDP_TX_ACTIVE 0
#define ICE_AQ_LLDP_TX_SUSPENDED 1
#define ICE_AQ_LLDP_TX_FLUSHED 3
/* DCBX mode */
#define ICE_AQ_LLDP_DCBX_M GENMASK(7, 6)
#define ICE_AQ_LLDP_DCBX_NA 0
#define ICE_AQ_LLDP_DCBX_CEE 1
#define ICE_AQ_LLDP_DCBX_IEEE 2
u8 state;
#define ICE_AQ_LLDP_MIB_CHANGE_STATE_M BIT(0)
#define ICE_AQ_LLDP_MIB_CHANGE_EXECUTED 0
#define ICE_AQ_LLDP_MIB_CHANGE_PENDING 1
/* The following bytes are reserved for the Get LLDP MIB command (0x0A00)
* and in the LLDP MIB Change Event (0x0A01). They are valid for the
* Get LLDP MIB (0x0A00) response only.
*/
u8 reserved1;
__le16 local_len;
__le16 remote_len;
u8 reserved2[2];
u8 reserved[2];
__le32 addr_high;
__le32 addr_low;
};
......@@ -1677,6 +1687,9 @@ struct ice_aqc_lldp_set_mib_change {
u8 command;
#define ICE_AQ_LLDP_MIB_UPDATE_ENABLE 0x0
#define ICE_AQ_LLDP_MIB_UPDATE_DIS 0x1
#define ICE_AQ_LLDP_MIB_PENDING_M BIT(1)
#define ICE_AQ_LLDP_MIB_PENDING_DISABLE 0
#define ICE_AQ_LLDP_MIB_PENDING_ENABLE 1
u8 reserved[15];
};
......@@ -2329,6 +2342,7 @@ enum ice_adminq_opc {
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
ice_aqc_opc_lldp_filter_ctrl = 0x0A0A,
ice_aqc_opc_lldp_execute_pending_mib = 0x0A0B,
/* RSS commands */
ice_aqc_opc_set_rss_key = 0x0B02,
......
......@@ -5503,6 +5503,19 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_lldp_execute_pending_mib - execute LLDP pending MIB request
* @hw: pointer to HW struct
*/
int ice_lldp_execute_pending_mib(struct ice_hw *hw)
{
struct ice_aq_desc desc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_execute_pending_mib);
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
}
/**
* ice_fw_supports_report_dflt_cfg
* @hw: pointer to the hardware structure
......
......@@ -122,7 +122,7 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update);
int
ice_cfg_phy_fc(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
enum ice_fc_mode fc);
enum ice_fc_mode req_mode);
bool
ice_phy_caps_equals_cfg(struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
......@@ -221,6 +221,7 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
int
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
int ice_lldp_execute_pending_mib(struct ice_hw *hw);
int
ice_aq_read_i2c(struct ice_hw *hw, struct ice_aqc_link_topo_addr topo_addr,
u16 bus_addr, __le16 addr, u8 params, u8 *data,
......
......@@ -73,6 +73,9 @@ ice_aq_cfg_lldp_mib_change(struct ice_hw *hw, bool ena_update,
if (!ena_update)
cmd->command |= ICE_AQ_LLDP_MIB_UPDATE_DIS;
else
cmd->command |= FIELD_PREP(ICE_AQ_LLDP_MIB_PENDING_M,
ICE_AQ_LLDP_MIB_PENDING_ENABLE);
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
......@@ -963,6 +966,42 @@ int ice_get_dcb_cfg(struct ice_port_info *pi)
return ret;
}
/**
* ice_get_dcb_cfg_from_mib_change
* @pi: port information structure
* @event: pointer to the admin queue receive event
*
* Set DCB configuration from received MIB Change event
*/
void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_rq_event_info *event)
{
struct ice_dcbx_cfg *dcbx_cfg = &pi->qos_cfg.local_dcbx_cfg;
struct ice_aqc_lldp_get_mib *mib;
u8 change_type, dcbx_mode;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
change_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
if (change_type == ICE_AQ_LLDP_MIB_REMOTE)
dcbx_cfg = &pi->qos_cfg.remote_dcbx_cfg;
dcbx_mode = FIELD_GET(ICE_AQ_LLDP_DCBX_M, mib->type);
switch (dcbx_mode) {
case ICE_AQ_LLDP_DCBX_IEEE:
dcbx_cfg->dcbx_mode = ICE_DCBX_MODE_IEEE;
ice_lldp_to_dcb_cfg(event->msg_buf, dcbx_cfg);
break;
case ICE_AQ_LLDP_DCBX_CEE:
pi->qos_cfg.desired_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
ice_cee_to_dcb_cfg((struct ice_aqc_get_cee_dcb_cfg_resp *)
event->msg_buf, pi);
break;
}
}
/**
* ice_init_dcb
* @hw: pointer to the HW struct
......
......@@ -144,6 +144,8 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
struct ice_dcbx_cfg *dcbcfg);
int ice_get_dcb_cfg(struct ice_port_info *pi);
int ice_set_dcb_cfg(struct ice_port_info *pi);
void ice_get_dcb_cfg_from_mib_change(struct ice_port_info *pi,
struct ice_rq_event_info *event);
int ice_init_dcb(struct ice_hw *hw, bool enable_mib_change);
int
ice_query_port_ets(struct ice_port_info *pi,
......
......@@ -859,7 +859,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
if (err)
goto dcb_init_err;
return err;
return 0;
dcb_init_err:
dev_err(dev, "DCB init failed\n");
......@@ -943,6 +943,16 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_tx_ring *tx_ring,
}
}
/**
* ice_dcb_is_mib_change_pending - Check if MIB change is pending
* @state: MIB change state
*/
static bool ice_dcb_is_mib_change_pending(u8 state)
{
return ICE_AQ_LLDP_MIB_CHANGE_PENDING ==
FIELD_GET(ICE_AQ_LLDP_MIB_CHANGE_STATE_M, state);
}
/**
* ice_dcb_process_lldp_set_mib_change - Process MIB change
* @pf: ptr to ice_pf
......@@ -956,6 +966,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct device *dev = ice_pf_to_dev(pf);
struct ice_aqc_lldp_get_mib *mib;
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool pending_handled = true;
bool need_reconfig = false;
struct ice_port_info *pi;
u8 mib_type;
......@@ -972,41 +983,58 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pi = pf->hw.port_info;
mib = (struct ice_aqc_lldp_get_mib *)&event->desc.params.raw;
/* Ignore if event is not for Nearest Bridge */
mib_type = ((mib->type >> ICE_AQ_LLDP_BRID_TYPE_S) &
ICE_AQ_LLDP_BRID_TYPE_M);
mib_type = FIELD_GET(ICE_AQ_LLDP_BRID_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event MIB bridge type 0x%x\n", mib_type);
if (mib_type != ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID)
return;
/* A pending change event contains accurate config information, and
* the FW setting has not been updaed yet, so detect if change is
* pending to determine where to pull config information from
* (FW vs event)
*/
if (ice_dcb_is_mib_change_pending(mib->state))
pending_handled = false;
/* Check MIB Type and return if event for Remote MIB update */
mib_type = mib->type & ICE_AQ_LLDP_MIB_TYPE_M;
mib_type = FIELD_GET(ICE_AQ_LLDP_MIB_TYPE_M, mib->type);
dev_dbg(dev, "LLDP event mib type %s\n", mib_type ? "remote" : "local");
if (mib_type == ICE_AQ_LLDP_MIB_REMOTE) {
/* Update the remote cached instance and return */
ret = ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
&pi->qos_cfg.remote_dcbx_cfg);
if (ret) {
dev_err(dev, "Failed to get remote DCB config\n");
return;
if (!pending_handled) {
ice_get_dcb_cfg_from_mib_change(pi, event);
} else {
ret =
ice_aq_get_dcb_cfg(pi->hw, ICE_AQ_LLDP_MIB_REMOTE,
ICE_AQ_LLDP_BRID_TYPE_NEAREST_BRID,
&pi->qos_cfg.remote_dcbx_cfg);
if (ret)
dev_dbg(dev, "Failed to get remote DCB config\n");
}
return;
}
/* That a DCB change has happened is now determined */
mutex_lock(&pf->tc_mutex);
/* store the old configuration */
tmp_dcbx_cfg = pf->hw.port_info->qos_cfg.local_dcbx_cfg;
tmp_dcbx_cfg = pi->qos_cfg.local_dcbx_cfg;
/* Reset the old DCBX configuration data */
memset(&pi->qos_cfg.local_dcbx_cfg, 0,
sizeof(pi->qos_cfg.local_dcbx_cfg));
/* Get updated DCBX data from firmware */
ret = ice_get_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(dev, "Failed to get DCB config\n");
goto out;
if (!pending_handled) {
ice_get_dcb_cfg_from_mib_change(pi, event);
} else {
ret = ice_get_dcb_cfg(pi);
if (ret) {
dev_err(dev, "Failed to get DCB config\n");
goto out;
}
}
/* No change detected in DCBX configs */
......@@ -1033,11 +1061,17 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
/* Send Execute Pending MIB Change event if it is a Pending event */
if (!pending_handled) {
ice_lldp_execute_pending_mib(&pf->hw);
pending_handled = true;
}
rtnl_lock();
/* disable VSIs affected by DCB changes */
ice_dcb_ena_dis_vsi(pf, false, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
ret = ice_query_port_ets(pi, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
goto unlock_rtnl;
......@@ -1052,4 +1086,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
rtnl_unlock();
out:
mutex_unlock(&pf->tc_mutex);
/* Send Execute Pending MIB Change event if it is a Pending event */
if (!pending_handled)
ice_lldp_execute_pending_mib(&pf->hw);
}
This diff is collapsed.
This diff is collapsed.
......@@ -664,7 +664,7 @@ static int ice_lbtest_prepare_rings(struct ice_vsi *vsi)
if (status)
goto err_start_rx_ring;
return status;
return 0;
err_start_rx_ring:
ice_vsi_free_rx_rings(vsi);
......@@ -1950,8 +1950,7 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
ICE_PHY_TYPE_LOW_100G_CAUI4 |
ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC |
ICE_PHY_TYPE_LOW_100G_AUI4 |
ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4 |
ICE_PHY_TYPE_LOW_100GBASE_CP2;
ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4;
phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC |
ICE_PHY_TYPE_HIGH_100G_CAUI2 |
ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC |
......@@ -1964,15 +1963,27 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
100000baseCR4_Full);
}
phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_SR4 |
ICE_PHY_TYPE_LOW_100GBASE_SR2;
if (phy_types_low & phy_type_mask_lo) {
if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_CP2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseCR2_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseCR2_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR4_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseSR4_Full);
}
if (phy_types_low & ICE_PHY_TYPE_LOW_100GBASE_SR2) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseSR2_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseSR2_Full);
}
phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_LR4 |
ICE_PHY_TYPE_LOW_100GBASE_DR;
if (phy_types_low & phy_type_mask_lo) {
......@@ -1984,14 +1995,20 @@ ice_phy_type_to_ethtool(struct net_device *netdev,
phy_type_mask_lo = ICE_PHY_TYPE_LOW_100GBASE_KR4 |
ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4;
phy_type_mask_hi = ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4;
if (phy_types_low & phy_type_mask_lo ||
phy_types_high & phy_type_mask_hi) {
if (phy_types_low & phy_type_mask_lo) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR4_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseKR4_Full);
}
if (phy_types_high & ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4) {
ethtool_link_ksettings_add_link_mode(ks, supported,
100000baseKR2_Full);
ice_ethtool_advertise_link_mode(ICE_AQ_LINK_SPEED_100GB,
100000baseKR2_Full);
}
}
#define TEST_SET_BITS_TIMEOUT 50
......@@ -2242,17 +2259,15 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
100baseT_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_100MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseX_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseX_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseT_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
1000baseKX_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_1000MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2500baseT_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
2500baseT_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
2500baseX_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_2500MB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
......@@ -2261,9 +2276,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseT_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseKR_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_10GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseKR_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseSR_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
10000baseLR_Full))
......@@ -2287,9 +2301,8 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseCR2_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseKR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseKR2_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
50000baseSR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_50GB;
if (ethtool_link_ksettings_test_link_mode(ks, advertising,
......@@ -2299,7 +2312,13 @@ ice_ksettings_find_adv_link_speed(const struct ethtool_link_ksettings *ks)
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseLR4_ER4_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseKR4_Full))
100000baseKR4_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseCR2_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseSR2_Full) ||
ethtool_link_ksettings_test_link_mode(ks, advertising,
100000baseKR2_Full))
adv_link_speed |= ICE_AQ_LINK_SPEED_100GB;
return adv_link_speed;
......
......@@ -6,75 +6,6 @@
#include "ice_type.h"
/* Package minimal version supported */
#define ICE_PKG_SUPP_VER_MAJ 1
#define ICE_PKG_SUPP_VER_MNR 3
/* Package format version */
#define ICE_PKG_FMT_VER_MAJ 1
#define ICE_PKG_FMT_VER_MNR 0
#define ICE_PKG_FMT_VER_UPD 0
#define ICE_PKG_FMT_VER_DFT 0
#define ICE_PKG_CNT 4
enum ice_ddp_state {
/* Indicates that this call to ice_init_pkg
* successfully loaded the requested DDP package
*/
ICE_DDP_PKG_SUCCESS = 0,
/* Generic error for already loaded errors, it is mapped later to
* the more specific one (one of the next 3)
*/
ICE_DDP_PKG_ALREADY_LOADED = -1,
/* Indicates that a DDP package of the same version has already been
* loaded onto the device by a previous call or by another PF
*/
ICE_DDP_PKG_SAME_VERSION_ALREADY_LOADED = -2,
/* The device has a DDP package that is not supported by the driver */
ICE_DDP_PKG_ALREADY_LOADED_NOT_SUPPORTED = -3,
/* The device has a compatible package
* (but different from the request) already loaded
*/
ICE_DDP_PKG_COMPATIBLE_ALREADY_LOADED = -4,
/* The firmware loaded on the device is not compatible with
* the DDP package loaded
*/
ICE_DDP_PKG_FW_MISMATCH = -5,
/* The DDP package file is invalid */
ICE_DDP_PKG_INVALID_FILE = -6,
/* The version of the DDP package provided is higher than
* the driver supports
*/
ICE_DDP_PKG_FILE_VERSION_TOO_HIGH = -7,
/* The version of the DDP package provided is lower than the
* driver supports
*/
ICE_DDP_PKG_FILE_VERSION_TOO_LOW = -8,
/* The signature of the DDP package file provided is invalid */
ICE_DDP_PKG_FILE_SIGNATURE_INVALID = -9,
/* The DDP package file security revision is too low and not
* supported by firmware
*/
ICE_DDP_PKG_FILE_REVISION_TOO_LOW = -10,
/* An error occurred in firmware while loading the DDP package */
ICE_DDP_PKG_LOAD_ERROR = -11,
/* Other errors */
ICE_DDP_PKG_ERR = -12
};
int
ice_acquire_change_lock(struct ice_hw *hw, enum ice_aq_res_access_type access);
void ice_release_change_lock(struct ice_hw *hw);
......
......@@ -3,205 +3,7 @@
#ifndef _ICE_FLEX_TYPE_H_
#define _ICE_FLEX_TYPE_H_
#define ICE_FV_OFFSET_INVAL 0x1FF
/* Extraction Sequence (Field Vector) Table */
struct ice_fv_word {
u8 prot_id;
u16 off; /* Offset within the protocol header */
u8 resvrd;
} __packed;
#define ICE_MAX_NUM_PROFILES 256
#define ICE_MAX_FV_WORDS 48
struct ice_fv {
struct ice_fv_word ew[ICE_MAX_FV_WORDS];
};
/* Package and segment headers and tables */
struct ice_pkg_hdr {
struct ice_pkg_ver pkg_format_ver;
__le32 seg_count;
__le32 seg_offset[];
};
/* generic segment */
struct ice_generic_seg_hdr {
#define SEGMENT_TYPE_METADATA 0x00000001
#define SEGMENT_TYPE_ICE 0x00000010
__le32 seg_type;
struct ice_pkg_ver seg_format_ver;
__le32 seg_size;
char seg_id[ICE_PKG_NAME_SIZE];
};
/* ice specific segment */
union ice_device_id {
struct {
__le16 device_id;
__le16 vendor_id;
} dev_vend_id;
__le32 id;
};
struct ice_device_id_entry {
union ice_device_id device;
union ice_device_id sub_device;
};
struct ice_seg {
struct ice_generic_seg_hdr hdr;
__le32 device_table_count;
struct ice_device_id_entry device_table[];
};
struct ice_nvm_table {
__le32 table_count;
__le32 vers[];
};
struct ice_buf {
#define ICE_PKG_BUF_SIZE 4096
u8 buf[ICE_PKG_BUF_SIZE];
};
struct ice_buf_table {
__le32 buf_count;
struct ice_buf buf_array[];
};
/* global metadata specific segment */
struct ice_global_metadata_seg {
struct ice_generic_seg_hdr hdr;
struct ice_pkg_ver pkg_ver;
__le32 rsvd;
char pkg_name[ICE_PKG_NAME_SIZE];
};
#define ICE_MIN_S_OFF 12
#define ICE_MAX_S_OFF 4095
#define ICE_MIN_S_SZ 1
#define ICE_MAX_S_SZ 4084
/* section information */
struct ice_section_entry {
__le32 type;
__le16 offset;
__le16 size;
};
#define ICE_MIN_S_COUNT 1
#define ICE_MAX_S_COUNT 511
#define ICE_MIN_S_DATA_END 12
#define ICE_MAX_S_DATA_END 4096
#define ICE_METADATA_BUF 0x80000000
struct ice_buf_hdr {
__le16 section_count;
__le16 data_end;
struct ice_section_entry section_entry[];
};
#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
struct_size((struct ice_buf_hdr *)0, section_entry, 1) - (hd_sz)) /\
(ent_sz))
/* ice package section IDs */
#define ICE_SID_METADATA 1
#define ICE_SID_XLT0_SW 10
#define ICE_SID_XLT_KEY_BUILDER_SW 11
#define ICE_SID_XLT1_SW 12
#define ICE_SID_XLT2_SW 13
#define ICE_SID_PROFID_TCAM_SW 14
#define ICE_SID_PROFID_REDIR_SW 15
#define ICE_SID_FLD_VEC_SW 16
#define ICE_SID_CDID_KEY_BUILDER_SW 17
struct ice_meta_sect {
struct ice_pkg_ver ver;
#define ICE_META_SECT_NAME_SIZE 28
char name[ICE_META_SECT_NAME_SIZE];
__le32 track_id;
};
#define ICE_SID_CDID_REDIR_SW 18
#define ICE_SID_XLT0_ACL 20
#define ICE_SID_XLT_KEY_BUILDER_ACL 21
#define ICE_SID_XLT1_ACL 22
#define ICE_SID_XLT2_ACL 23
#define ICE_SID_PROFID_TCAM_ACL 24
#define ICE_SID_PROFID_REDIR_ACL 25
#define ICE_SID_FLD_VEC_ACL 26
#define ICE_SID_CDID_KEY_BUILDER_ACL 27
#define ICE_SID_CDID_REDIR_ACL 28
#define ICE_SID_XLT0_FD 30
#define ICE_SID_XLT_KEY_BUILDER_FD 31
#define ICE_SID_XLT1_FD 32
#define ICE_SID_XLT2_FD 33
#define ICE_SID_PROFID_TCAM_FD 34
#define ICE_SID_PROFID_REDIR_FD 35
#define ICE_SID_FLD_VEC_FD 36
#define ICE_SID_CDID_KEY_BUILDER_FD 37
#define ICE_SID_CDID_REDIR_FD 38
#define ICE_SID_XLT0_RSS 40
#define ICE_SID_XLT_KEY_BUILDER_RSS 41
#define ICE_SID_XLT1_RSS 42
#define ICE_SID_XLT2_RSS 43
#define ICE_SID_PROFID_TCAM_RSS 44
#define ICE_SID_PROFID_REDIR_RSS 45
#define ICE_SID_FLD_VEC_RSS 46
#define ICE_SID_CDID_KEY_BUILDER_RSS 47
#define ICE_SID_CDID_REDIR_RSS 48
#define ICE_SID_RXPARSER_MARKER_PTYPE 55
#define ICE_SID_RXPARSER_BOOST_TCAM 56
#define ICE_SID_RXPARSER_METADATA_INIT 58
#define ICE_SID_TXPARSER_BOOST_TCAM 66
#define ICE_SID_XLT0_PE 80
#define ICE_SID_XLT_KEY_BUILDER_PE 81
#define ICE_SID_XLT1_PE 82
#define ICE_SID_XLT2_PE 83
#define ICE_SID_PROFID_TCAM_PE 84
#define ICE_SID_PROFID_REDIR_PE 85
#define ICE_SID_FLD_VEC_PE 86
#define ICE_SID_CDID_KEY_BUILDER_PE 87
#define ICE_SID_CDID_REDIR_PE 88
/* Label Metadata section IDs */
#define ICE_SID_LBL_FIRST 0x80000010
#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
/* The following define MUST be updated to reflect the last label section ID */
#define ICE_SID_LBL_LAST 0x80000038
enum ice_block {
ICE_BLK_SW = 0,
ICE_BLK_ACL,
ICE_BLK_FD,
ICE_BLK_RSS,
ICE_BLK_PE,
ICE_BLK_COUNT
};
enum ice_sect {
ICE_XLT0 = 0,
ICE_XLT_KB,
ICE_XLT1,
ICE_XLT2,
ICE_PROF_TCAM,
ICE_PROF_REDIR,
ICE_VEC_TBL,
ICE_CDID_KB,
ICE_CDID_REDIR,
ICE_SECT_COUNT
};
#include "ice_ddp.h"
/* Packet Type (PTYPE) values */
#define ICE_PTYPE_MAC_PAY 1
......@@ -283,134 +85,6 @@ struct ice_ptype_attributes {
enum ice_ptype_attrib_type attrib;
};
/* package labels */
struct ice_label {
__le16 value;
#define ICE_PKG_LABEL_SIZE 64
char name[ICE_PKG_LABEL_SIZE];
};
struct ice_label_section {
__le16 count;
struct ice_label label[];
};
#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
struct_size((struct ice_label_section *)0, label, 1) - \
sizeof(struct ice_label), sizeof(struct ice_label))
struct ice_sw_fv_section {
__le16 count;
__le16 base_offset;
struct ice_fv fv[];
};
struct ice_sw_fv_list_entry {
struct list_head list_entry;
u32 profile_id;
struct ice_fv *fv_ptr;
};
/* The BOOST TCAM stores the match packet header in reverse order, meaning
* the fields are reversed; in addition, this means that the normally big endian
* fields of the packet are now little endian.
*/
struct ice_boost_key_value {
#define ICE_BOOST_REMAINING_HV_KEY 15
u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
__le16 hv_dst_port_key;
__le16 hv_src_port_key;
u8 tcam_search_key;
} __packed;
struct ice_boost_key {
struct ice_boost_key_value key;
struct ice_boost_key_value key2;
};
/* package Boost TCAM entry */
struct ice_boost_tcam_entry {
__le16 addr;
__le16 reserved;
/* break up the 40 bytes of key into different fields */
struct ice_boost_key key;
u8 boost_hit_index_group;
/* The following contains bitfields which are not on byte boundaries.
* These fields are currently unused by driver software.
*/
#define ICE_BOOST_BIT_FIELDS 43
u8 bit_fields[ICE_BOOST_BIT_FIELDS];
};
struct ice_boost_tcam_section {
__le16 count;
__le16 reserved;
struct ice_boost_tcam_entry tcam[];
};
#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
struct_size((struct ice_boost_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_boost_tcam_entry), \
sizeof(struct ice_boost_tcam_entry))
/* package Marker Ptype TCAM entry */
struct ice_marker_ptype_tcam_entry {
#define ICE_MARKER_PTYPE_TCAM_ADDR_MAX 1024
__le16 addr;
__le16 ptype;
u8 keys[20];
};
struct ice_marker_ptype_tcam_section {
__le16 count;
__le16 reserved;
struct ice_marker_ptype_tcam_entry tcam[];
};
#define ICE_MAX_MARKER_PTYPE_TCAMS_IN_BUF \
ICE_MAX_ENTRIES_IN_BUF(struct_size((struct ice_marker_ptype_tcam_section *)0, tcam, 1) - \
sizeof(struct ice_marker_ptype_tcam_entry), \
sizeof(struct ice_marker_ptype_tcam_entry))
struct ice_xlt1_section {
__le16 count;
__le16 offset;
u8 value[];
};
struct ice_xlt2_section {
__le16 count;
__le16 offset;
__le16 value[];
};
struct ice_prof_redir_section {
__le16 count;
__le16 offset;
u8 redir_value[];
};
/* package buffer building */
struct ice_buf_build {
struct ice_buf buf;
u16 reserved_section_table_entries;
};
struct ice_pkg_enum {
struct ice_buf_table *buf_table;
u32 buf_idx;
u32 type;
struct ice_buf_hdr *buf;
u32 sect_idx;
void *sect;
u32 sect_type;
u32 entry_idx;
void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
};
/* Tunnel enabling */
enum ice_tunnel_type {
......
......@@ -1670,7 +1670,7 @@ static int ice_vsi_alloc_ring_stats(struct ice_vsi *vsi)
if (!ring_stats)
goto err_out;
WRITE_ONCE(rx_ring_stats[i], ring_stats);
WRITE_ONCE(rx_ring_stats[i], ring_stats);
}
ring->ring_stats = ring_stats;
......
......@@ -3780,13 +3780,11 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
unroll_napi_add:
ice_tc_indir_block_unregister(vsi);
unroll_cfg_netdev:
if (vsi) {
ice_napi_del(vsi);
if (vsi->netdev) {
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
ice_napi_del(vsi);
if (vsi->netdev) {
clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state);
free_netdev(vsi->netdev);
vsi->netdev = NULL;
}
unroll_vsi_setup:
......@@ -5048,8 +5046,11 @@ static void ice_setup_mc_magic_wake(struct ice_pf *pf)
static void ice_remove(struct pci_dev *pdev)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
struct ice_hw *hw;
int i;
hw = &pf->hw;
ice_devlink_unregister(pf);
for (i = 0; i < ICE_MAX_RESET_WAIT; i++) {
if (!ice_is_reset_in_progress(pf->state))
......@@ -5082,7 +5083,7 @@ static void ice_remove(struct pci_dev *pdev)
ice_remove_arfs(pf);
ice_setup_mc_magic_wake(pf);
ice_vsi_release_all(pf);
mutex_destroy(&(&pf->hw)->fdir_fltr_lock);
mutex_destroy(&hw->fdir_fltr_lock);
ice_set_wake(pf);
ice_free_irq_msix_misc(pf);
ice_for_each_vsi(pf, i) {
......@@ -5094,13 +5095,13 @@ static void ice_remove(struct pci_dev *pdev)
pf->vsi_stats = NULL;
ice_deinit_pf(pf);
ice_devlink_destroy_regions(pf);
ice_deinit_hw(&pf->hw);
ice_deinit_hw(hw);
/* Issue a PFR as part of the prescribed driver unload flow. Do not
* do it via ice_schedule_reset() since there is no need to rebuild
* and the service task is already stopped.
*/
ice_reset(&pf->hw, ICE_RESET_PFR);
ice_reset(hw, ICE_RESET_PFR);
pci_wait_for_pending_transaction(pdev);
ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev);
......@@ -6146,15 +6147,12 @@ int ice_vsi_cfg(struct ice_vsi *vsi)
{
int err;
if (vsi->netdev) {
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_set_rx_mode(vsi->netdev);
if (vsi->type != ICE_VSI_LB) {
err = ice_vsi_vlan_setup(vsi);
if (err)
return err;
}
err = ice_vsi_vlan_setup(vsi);
if (err)
return err;
}
ice_vsi_cfg_dcb_rings(vsi);
......@@ -6335,7 +6333,7 @@ static int ice_up_complete(struct ice_vsi *vsi)
if (vsi->port_info &&
(vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) &&
vsi->netdev) {
vsi->netdev && vsi->type == ICE_VSI_PF) {
ice_print_link_msg(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
......@@ -6346,7 +6344,9 @@ static int ice_up_complete(struct ice_vsi *vsi)
* set the baseline so counters are ready when interface is up
*/
ice_update_eth_stats(vsi);
ice_service_task_schedule(pf);
if (vsi->type == ICE_VSI_PF)
ice_service_task_schedule(pf);
return 0;
}
......
......@@ -662,7 +662,6 @@ ice_get_orom_civd_data(struct ice_hw *hw, enum ice_bank_select bank,
/* Verify that the simple checksum is zero */
for (i = 0; i < sizeof(*tmp); i++)
/* cppcheck-suppress objectIndex */
sum += ((u8 *)tmp)[i];
if (sum) {
......
......@@ -2269,7 +2269,7 @@ static void ice_ptp_set_caps(struct ice_pf *pf)
snprintf(info->name, sizeof(info->name) - 1, "%s-%s-clk",
dev_driver_string(dev), dev_name(dev));
info->owner = THIS_MODULE;
info->max_adj = 999999999;
info->max_adj = 100000000;
info->adjtime = ice_ptp_adjtime;
info->adjfine = ice_ptp_adjfine;
info->gettimex64 = ice_ptp_gettimex64;
......
......@@ -1063,7 +1063,6 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
u16 max_child_nodes, num_added = 0;
/* cppcheck-suppress unusedVariable */
u32 temp;
status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
......@@ -1655,12 +1654,13 @@ ice_sched_add_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle,
u32 first_node_teid;
u16 num_added = 0;
u8 i, qgl, vsil;
int status;
qgl = ice_sched_get_qgrp_layer(hw);
vsil = ice_sched_get_vsi_layer(hw);
parent = ice_sched_get_vsi_node(pi, tc_node, vsi_handle);
for (i = vsil + 1; i <= qgl; i++) {
int status;
if (!parent)
return -EIO;
......@@ -1756,13 +1756,14 @@ ice_sched_add_vsi_support_nodes(struct ice_port_info *pi, u16 vsi_handle,
u32 first_node_teid;
u16 num_added = 0;
u8 i, vsil;
int status;
if (!pi)
return -EINVAL;
vsil = ice_sched_get_vsi_layer(pi->hw);
for (i = pi->hw->sw_entry_point_layer; i <= vsil; i++) {
int status;
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
i, num_nodes[i],
&first_node_teid,
......
......@@ -792,7 +792,7 @@ static struct ice_vsi *
ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
{
struct ice_rx_ring *ring = NULL;
struct ice_vsi *ch_vsi = NULL;
struct ice_vsi *dest_vsi = NULL;
struct ice_pf *pf = vsi->back;
struct device *dev;
u32 tc_class;
......@@ -810,7 +810,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
return ERR_PTR(-EOPNOTSUPP);
}
/* Locate ADQ VSI depending on hw_tc number */
ch_vsi = vsi->tc_map_vsi[tc_class];
dest_vsi = vsi->tc_map_vsi[tc_class];
break;
case ICE_FWD_TO_Q:
/* Locate the Rx queue */
......@@ -824,7 +824,7 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
/* Determine destination VSI even though the action is
* FWD_TO_QUEUE, because QUEUE is associated with VSI
*/
ch_vsi = tc_fltr->dest_vsi;
dest_vsi = tc_fltr->dest_vsi;
break;
default:
dev_err(dev,
......@@ -832,13 +832,13 @@ ice_tc_forward_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *tc_fltr)
tc_fltr->action.fltr_act);
return ERR_PTR(-EINVAL);
}
/* Must have valid ch_vsi (it could be main VSI or ADQ VSI) */
if (!ch_vsi) {
/* Must have valid dest_vsi (it could be main VSI or ADQ VSI) */
if (!dest_vsi) {
dev_err(dev,
"Unable to add filter because specified destination VSI doesn't exist\n");
return ERR_PTR(-EINVAL);
}
return ch_vsi;
return dest_vsi;
}
/**
......@@ -860,7 +860,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 flags = tc_fltr->flags;
struct ice_vsi *ch_vsi;
struct ice_vsi *dest_vsi;
struct device *dev;
u16 lkups_cnt = 0;
u16 l4_proto = 0;
......@@ -883,9 +883,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
}
/* validate forwarding action VSI and queue */
ch_vsi = ice_tc_forward_action(vsi, tc_fltr);
if (IS_ERR(ch_vsi))
return PTR_ERR(ch_vsi);
if (ice_is_forward_action(tc_fltr->action.fltr_act)) {
dest_vsi = ice_tc_forward_action(vsi, tc_fltr);
if (IS_ERR(dest_vsi))
return PTR_ERR(dest_vsi);
}
lkups_cnt = ice_tc_count_lkups(flags, headers, tc_fltr);
list = kcalloc(lkups_cnt, sizeof(*list), GFP_ATOMIC);
......@@ -904,7 +906,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
switch (tc_fltr->action.fltr_act) {
case ICE_FWD_TO_VSI:
rule_info.sw_act.vsi_handle = ch_vsi->idx;
rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
......@@ -915,7 +917,7 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
case ICE_FWD_TO_Q:
/* HW queue number in global space */
rule_info.sw_act.fwd_id.q_id = tc_fltr->action.fwd.q.hw_queue;
rule_info.sw_act.vsi_handle = ch_vsi->idx;
rule_info.sw_act.vsi_handle = dest_vsi->idx;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_QUEUE;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
......@@ -923,14 +925,15 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.queue,
tc_fltr->action.fwd.q.hw_queue, lkups_cnt);
break;
default:
rule_info.sw_act.flag |= ICE_FLTR_TX;
/* In case of Tx (LOOKUP_TX), src needs to be src VSI */
rule_info.sw_act.src = vsi->idx;
/* 'Rx' is false, direction of rule(LOOKUPTRX) */
rule_info.rx = false;
case ICE_DROP_PACKET:
rule_info.sw_act.flag |= ICE_FLTR_RX;
rule_info.sw_act.src = hw->pf_id;
rule_info.rx = true;
rule_info.priority = ICE_SWITCH_FLTR_PRIO_VSI;
break;
default:
ret = -EOPNOTSUPP;
goto exit;
}
ret = ice_add_adv_rule(hw, list, lkups_cnt, &rule_info, &rule_added);
......@@ -953,11 +956,11 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->dest_vsi_handle = rule_added.vsi_handle;
if (tc_fltr->action.fltr_act == ICE_FWD_TO_VSI ||
tc_fltr->action.fltr_act == ICE_FWD_TO_Q) {
tc_fltr->dest_vsi = ch_vsi;
tc_fltr->dest_vsi = dest_vsi;
/* keep track of advanced switch filter for
* destination VSI
*/
ch_vsi->num_chnl_fltr++;
dest_vsi->num_chnl_fltr++;
/* keeps track of channel filters for PF VSI */
if (vsi->type == ICE_VSI_PF &&
......@@ -978,6 +981,10 @@ ice_add_tc_flower_adv_fltr(struct ice_vsi *vsi,
tc_fltr->action.fwd.q.hw_queue, rule_added.rid,
rule_added.rule_id);
break;
case ICE_DROP_PACKET:
dev_dbg(dev, "added switch rule (lkups_cnt %u, flags 0x%x), action is drop, rid %u, rule_id %u\n",
lkups_cnt, flags, rule_added.rid, rule_added.rule_id);
break;
default:
break;
}
......@@ -1712,6 +1719,9 @@ ice_tc_parse_action(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr,
case FLOW_ACTION_RX_QUEUE_MAPPING:
/* forward to queue */
return ice_tc_forward_to_queue(vsi, fltr, act);
case FLOW_ACTION_DROP:
fltr->action.fltr_act = ICE_DROP_PACKET;
return 0;
default:
NL_SET_ERR_MSG_MOD(fltr->extack, "Unsupported TC action");
return -EOPNOTSUPP;
......
......@@ -211,4 +211,14 @@ ice_del_cls_flower(struct ice_vsi *vsi, struct flow_cls_offload *cls_flower);
void ice_replay_tc_fltrs(struct ice_pf *pf);
bool ice_is_tunnel_supported(struct net_device *dev);
static inline bool ice_is_forward_action(enum ice_sw_fwd_act_type fltr_act)
{
switch (fltr_act) {
case ICE_FWD_TO_VSI:
case ICE_FWD_TO_Q:
return true;
default:
return false;
}
}
#endif /* _ICE_TC_LIB_H_ */
......@@ -1996,7 +1996,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
if (err < 0)
return err;
/* cppcheck-suppress unreadVariable */
protocol = vlan_get_protocol(skb);
if (eth_p_mpls(protocol))
......@@ -2033,8 +2032,6 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
}
/* reset pointers to inner headers */
/* cppcheck-suppress unreadVariable */
ip.hdr = skb_inner_network_header(skb);
l4.hdr = skb_inner_transport_header(skb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment