Commit 4460985f authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2020-01-03

This series contains updates to the ice driver only.

Brett adds support for UDP segmentation offload (USO) based on the work
Alex Duyck did for other Intel drivers. Refactored how the VF sets
spoof checking to resolve a couple of issues found in
ice_set_vf_spoofchk().  Adds the ability to track of the dflt_vsI
(default VSI), since we cannot have more than one default VSI.  Add a
macro for commonly used "for loop" used repeatedly in the code.  Cleaned
up and made the VF link flows all similar.  Refactor the flows of adding
and deleting MAC addresses in order to simplify the logic for error
conditions and setting/clearing the VF's default MAC address field.

Michal moves the setting of the default ITR value from ice_cfg_itr() to
the function we allocate queue vectors.  Adds support for saving and
restoring the ITR value for each queue.  Adds a check for all invalid
or unused parameters to log the information and return an error.

Vignesh cleans up the driver where we were trying to write to read-only
registers for the receive flex descriptors.

Tony changes a netdev_info() to netdev_dbg() when the MTU value is
changed.

Bruce suppresses a coverity reported error that was not really an error
by adding a code comment.

Mitch adds a check for a NULL receive descriptor to resolve a coverity
reported issue.

Krzysztof prevents a potential general protection fault by adding a
boundary check to see if the queue id is greater than the size of a UMEM
array.  Adds additional code comments to assist coverity in its scans to
prevent false positives.

Jake adds support for E822 devices to the driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3c85efb8 5d9e618c
......@@ -174,6 +174,8 @@ struct ice_sw {
struct ice_pf *pf;
u16 sw_id; /* switch ID for this switch */
u16 bridge_mode; /* VEB/VEPA/Port Virtualizer */
struct ice_vsi *dflt_vsi; /* default VSI for this switch */
u8 dflt_vsi_ena:1; /* true if above dflt_vsi is enabled */
};
enum ice_state {
......@@ -275,6 +277,7 @@ struct ice_vsi {
u8 current_isup:1; /* Sync 'link up' logging */
u8 stat_offsets_loaded:1;
u8 vlan_ena:1;
u16 num_vlan;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
......@@ -462,12 +465,13 @@ static inline void ice_set_ring_xdp(struct ice_ring *ring)
static inline struct xdp_umem *ice_xsk_umem(struct ice_ring *ring)
{
struct xdp_umem **umems = ring->vsi->xsk_umems;
int qid = ring->q_index;
u16 qid = ring->q_index;
if (ice_ring_is_xdp(ring))
qid -= ring->vsi->num_xdp_txq;
if (!umems || !umems[qid] || !ice_is_xdp_ena_vsi(ring->vsi))
if (qid >= ring->vsi->num_xsk_umems || !umems || !umems[qid] ||
!ice_is_xdp_ena_vsi(ring->vsi))
return NULL;
return umems[qid];
......
......@@ -93,7 +93,8 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
* @vsi: the VSI being configured
* @v_idx: index of the vector in the VSI struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
* We allocate one q_vector and set default value for ITR setting associated
* with this q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
......@@ -108,6 +109,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
q_vector->vsi = vsi;
q_vector->v_idx = v_idx;
q_vector->tx.itr_setting = ICE_DFLT_TX_ITR;
q_vector->rx.itr_setting = ICE_DFLT_RX_ITR;
if (vsi->type == ICE_VSI_VF)
goto out;
/* only set affinity_mask if the CPU is online */
......@@ -299,6 +302,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (ring->vsi->type == ICE_VSI_PF) {
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
ring->q_index);
......@@ -323,7 +327,9 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
ring->q_index);
} else {
ring->zca.free = NULL;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq,
ring->netdev,
ring->q_index);
......@@ -674,10 +680,6 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
if (q_vector->num_ring_rx) {
struct ice_ring_container *rc = &q_vector->rx;
/* if this value is set then don't overwrite with default */
if (!rc->itr_setting)
rc->itr_setting = ICE_DFLT_RX_ITR;
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
......@@ -688,10 +690,6 @@ void ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
if (q_vector->num_ring_tx) {
struct ice_ring_container *rc = &q_vector->tx;
/* if this value is set then don't overwrite with default */
if (!rc->itr_setting)
rc->itr_setting = ICE_DFLT_TX_ITR;
rc->target_itr = ITR_TO_REG(rc->itr_setting);
rc->next_update = jiffies + 1;
rc->current_itr = rc->target_itr;
......
......@@ -7,25 +7,6 @@
#define ICE_PF_RESET_WAIT_COUNT 200
#define ICE_PROG_FLEX_ENTRY(hw, rxdid, mdid, idx) \
wr32((hw), GLFLXP_RXDID_FLX_WRD_##idx(rxdid), \
((ICE_RX_OPC_MDID << \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_RXDID_OPCODE_M) | \
(((mdid) << GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_S) & \
GLFLXP_RXDID_FLX_WRD_##idx##_PROT_MDID_M))
#define ICE_PROG_FLG_ENTRY(hw, rxdid, flg_0, flg_1, flg_2, flg_3, idx) \
wr32((hw), GLFLXP_RXDID_FLAGS(rxdid, idx), \
(((flg_0) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S) & \
GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M) | \
(((flg_1) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S) & \
GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M) | \
(((flg_2) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S) & \
GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M) | \
(((flg_3) << GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S) & \
GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M))
/**
* ice_set_mac_type - Sets MAC type
* @hw: pointer to the HW structure
......@@ -347,88 +328,6 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
return 0;
}
/**
* ice_init_flex_flags
* @hw: pointer to the hardware structure
* @prof_id: Rx Descriptor Builder profile ID
*
* Function to initialize Rx flex flags
*/
static void ice_init_flex_flags(struct ice_hw *hw, enum ice_rxdid prof_id)
{
u8 idx = 0;
/* Flex-flag fields (0-2) are programmed with FLG64 bits with layout:
* flexiflags0[5:0] - TCP flags, is_packet_fragmented, is_packet_UDP_GRE
* flexiflags1[3:0] - Not used for flag programming
* flexiflags2[7:0] - Tunnel and VLAN types
* 2 invalid fields in last index
*/
switch (prof_id) {
/* Rx flex flags are currently programmed for the NIC profiles only.
* Different flag bit programming configurations can be added per
* profile as needed.
*/
case ICE_RXDID_FLEX_NIC:
case ICE_RXDID_FLEX_NIC_2:
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_FRG,
ICE_FLG_UDP_GRE, ICE_FLG_PKT_DSI,
ICE_FLG_FIN, idx++);
/* flex flag 1 is not used for flexi-flag programming, skipping
* these four FLG64 bits.
*/
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_SYN, ICE_FLG_RST,
ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx++);
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_PKT_DSI,
ICE_FLG_PKT_DSI, ICE_FLG_EVLAN_x8100,
ICE_FLG_EVLAN_x9100, idx++);
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_VLAN_x8100,
ICE_FLG_TNL_VLAN, ICE_FLG_TNL_MAC,
ICE_FLG_TNL0, idx++);
ICE_PROG_FLG_ENTRY(hw, prof_id, ICE_FLG_TNL1, ICE_FLG_TNL2,
ICE_FLG_PKT_DSI, ICE_FLG_PKT_DSI, idx);
break;
default:
ice_debug(hw, ICE_DBG_INIT,
"Flag programming for profile ID %d not supported\n",
prof_id);
}
}
/**
* ice_init_flex_flds
* @hw: pointer to the hardware structure
* @prof_id: Rx Descriptor Builder profile ID
*
* Function to initialize flex descriptors
*/
static void ice_init_flex_flds(struct ice_hw *hw, enum ice_rxdid prof_id)
{
enum ice_flex_rx_mdid mdid;
switch (prof_id) {
case ICE_RXDID_FLEX_NIC:
case ICE_RXDID_FLEX_NIC_2:
ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_LOW, 0);
ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_HASH_HIGH, 1);
ICE_PROG_FLEX_ENTRY(hw, prof_id, ICE_RX_MDID_FLOW_ID_LOWER, 2);
mdid = (prof_id == ICE_RXDID_FLEX_NIC_2) ?
ICE_RX_MDID_SRC_VSI : ICE_RX_MDID_FLOW_ID_HIGH;
ICE_PROG_FLEX_ENTRY(hw, prof_id, mdid, 3);
ice_init_flex_flags(hw, prof_id);
break;
default:
ice_debug(hw, ICE_DBG_INIT,
"Field init for profile ID %d not supported\n",
prof_id);
}
}
/**
* ice_init_fltr_mgmt_struct - initializes filter management list and locks
* @hw: pointer to the HW struct
......@@ -882,9 +781,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_fltr_mgmt_struct;
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
status = ice_init_hw_tbls(hw);
if (status)
goto err_unroll_fltr_mgmt_struct;
......
......@@ -396,6 +396,12 @@ void ice_dcb_rebuild(struct ice_pf *pf)
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
/* Coverity warns the return code of ice_pf_dcb_cfg() is not checked
* here as is done for other calls to that function. That check is
* not necessary since this is in this function's error cleanup path.
* Suppress the Coverity warning with the following comment...
*/
/* coverity[check_return] */
ice_pf_dcb_cfg(pf, prev_cfg, false);
kfree(prev_cfg);
}
......
......@@ -11,5 +11,23 @@
#define ICE_DEV_ID_E810C_QSFP 0x1592
/* Intel(R) Ethernet Controller E810-C for SFP */
#define ICE_DEV_ID_E810C_SFP 0x1593
/* Intel(R) Ethernet Connection E822-C for backplane */
#define ICE_DEV_ID_E822C_BACKPLANE 0x1890
/* Intel(R) Ethernet Connection E822-C for QSFP */
#define ICE_DEV_ID_E822C_QSFP 0x1891
/* Intel(R) Ethernet Connection E822-C for SFP */
#define ICE_DEV_ID_E822C_SFP 0x1892
/* Intel(R) Ethernet Connection E822-C/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E822C_10G_BASE_T 0x1893
/* Intel(R) Ethernet Connection E822-C 1GbE */
#define ICE_DEV_ID_E822C_SGMII 0x1894
/* Intel(R) Ethernet Connection E822-X for backplane */
#define ICE_DEV_ID_E822X_BACKPLANE 0x1897
/* Intel(R) Ethernet Connection E822-L for SFP */
#define ICE_DEV_ID_E822L_SFP 0x1898
/* Intel(R) Ethernet Connection E822-L/X557-AT 10GBASE-T */
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
#endif /* _ICE_DEVIDS_H_ */
......@@ -283,12 +283,15 @@ ice_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
*/
static bool ice_active_vfs(struct ice_pf *pf)
{
struct ice_vf *vf = pf->vf;
int i;
for (i = 0; i < pf->num_alloc_vfs; i++, vf++)
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
if (test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
return true;
}
return false;
}
......@@ -3584,6 +3587,53 @@ ice_set_q_coalesce(struct ice_vsi *vsi, struct ethtool_coalesce *ec, int q_num)
return 0;
}
/**
* ice_is_coalesce_param_invalid - check for unsupported coalesce parameters
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
*
* Print netdev info if driver doesn't support one of the parameters
* and return error. When any parameters will be implemented, remove only
* this parameter from param array.
*/
static int
ice_is_coalesce_param_invalid(struct net_device *netdev,
struct ethtool_coalesce *ec)
{
struct ice_ethtool_not_used {
u32 value;
const char *name;
} param[] = {
{ec->stats_block_coalesce_usecs, "stats-block-usecs"},
{ec->rate_sample_interval, "sample-interval"},
{ec->pkt_rate_low, "pkt-rate-low"},
{ec->pkt_rate_high, "pkt-rate-high"},
{ec->rx_max_coalesced_frames, "rx-frames"},
{ec->rx_coalesce_usecs_irq, "rx-usecs-irq"},
{ec->rx_max_coalesced_frames_irq, "rx-frames-irq"},
{ec->tx_max_coalesced_frames, "tx-frames"},
{ec->tx_coalesce_usecs_irq, "tx-usecs-irq"},
{ec->tx_max_coalesced_frames_irq, "tx-frames-irq"},
{ec->rx_coalesce_usecs_low, "rx-usecs-low"},
{ec->rx_max_coalesced_frames_low, "rx-frames-low"},
{ec->tx_coalesce_usecs_low, "tx-usecs-low"},
{ec->tx_max_coalesced_frames_low, "tx-frames-low"},
{ec->rx_max_coalesced_frames_high, "rx-frames-high"},
{ec->tx_max_coalesced_frames_high, "tx-frames-high"}
};
int i;
for (i = 0; i < ARRAY_SIZE(param); i++) {
if (param[i].value) {
netdev_info(netdev, "Setting %s not supported\n",
param[i].name);
return -EINVAL;
}
}
return 0;
}
/**
* __ice_set_coalesce - set ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
......@@ -3600,6 +3650,9 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
if (ice_is_coalesce_param_invalid(netdev, ec))
return -EINVAL;
if (q_num < 0) {
int v_idx;
......
......@@ -60,15 +60,6 @@
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_S 8
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_1_M ICE_M(0x3F, 8)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_S 16
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_2_M ICE_M(0x3F, 16)
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_S 24
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_3_M ICE_M(0x3F, 24)
#define GLFLXP_RXDID_FLX_WRD_0(_i) (0x0045c800 + ((_i) * 4))
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_S 0
#define GLFLXP_RXDID_FLX_WRD_0_PROT_MDID_M ICE_M(0xFF, 0)
......
......@@ -817,12 +817,23 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi)
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
/* Enable MAC Antispoof with new VSI being initialized or updated */
if (vsi->type == ICE_VSI_VF && pf->vf[vsi->vf_id].spoofchk) {
/* enable/disable MAC and VLAN anti-spoof when spoofchk is on/off
* respectively
*/
if (vsi->type == ICE_VSI_VF) {
ctxt->info.valid_sections |=
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
ctxt->info.sec_flags |=
ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
if (pf->vf[vsi->vf_id].spoofchk) {
ctxt->info.sec_flags |=
ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
} else {
ctxt->info.sec_flags &=
~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
}
}
/* Allow control frames out of main VSI */
......@@ -1636,22 +1647,14 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
ctxt->info = vsi->info;
if (ena) {
ctxt->info.sec_flags |=
ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S;
if (ena)
ctxt->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
} else {
ctxt->info.sec_flags &=
~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
else
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
}
if (!vlan_promisc)
ctxt->info.valid_sections =
cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctxt, NULL);
if (status) {
......@@ -1661,7 +1664,6 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
goto err_out;
}
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
kfree(ctxt);
......@@ -2401,6 +2403,97 @@ int ice_vsi_release(struct ice_vsi *vsi)
return 0;
}
/**
* ice_vsi_rebuild_update_coalesce - set coalesce for a q_vector
* @q_vector: pointer to q_vector which is being updated
* @coalesce: pointer to array of struct with stored coalesce
*
* Set coalesce param in q_vector and update these parameters in HW.
*/
static void
ice_vsi_rebuild_update_coalesce(struct ice_q_vector *q_vector,
struct ice_coalesce_stored *coalesce)
{
struct ice_ring_container *rx_rc = &q_vector->rx;
struct ice_ring_container *tx_rc = &q_vector->tx;
struct ice_hw *hw = &q_vector->vsi->back->hw;
tx_rc->itr_setting = coalesce->itr_tx;
rx_rc->itr_setting = coalesce->itr_rx;
/* dynamic ITR values will be updated during Tx/Rx */
if (!ITR_IS_DYNAMIC(tx_rc->itr_setting))
wr32(hw, GLINT_ITR(tx_rc->itr_idx, q_vector->reg_idx),
ITR_REG_ALIGN(tx_rc->itr_setting) >>
ICE_ITR_GRAN_S);
if (!ITR_IS_DYNAMIC(rx_rc->itr_setting))
wr32(hw, GLINT_ITR(rx_rc->itr_idx, q_vector->reg_idx),
ITR_REG_ALIGN(rx_rc->itr_setting) >>
ICE_ITR_GRAN_S);
q_vector->intrl = coalesce->intrl;
wr32(hw, GLINT_RATE(q_vector->reg_idx),
ice_intrl_usec_to_reg(q_vector->intrl, hw->intrl_gran));
}
/**
* ice_vsi_rebuild_get_coalesce - get coalesce from all q_vectors
* @vsi: VSI connected with q_vectors
* @coalesce: array of struct with stored coalesce
*
* Returns array size.
*/
static int
ice_vsi_rebuild_get_coalesce(struct ice_vsi *vsi,
struct ice_coalesce_stored *coalesce)
{
int i;
ice_for_each_q_vector(vsi, i) {
struct ice_q_vector *q_vector = vsi->q_vectors[i];
coalesce[i].itr_tx = q_vector->tx.itr_setting;
coalesce[i].itr_rx = q_vector->rx.itr_setting;
coalesce[i].intrl = q_vector->intrl;
}
return vsi->num_q_vectors;
}
/**
* ice_vsi_rebuild_set_coalesce - set coalesce from earlier saved arrays
* @vsi: VSI connected with q_vectors
* @coalesce: pointer to array of struct with stored coalesce
* @size: size of coalesce array
*
* Before this function, ice_vsi_rebuild_get_coalesce should be called to save
* ITR params in arrays. If size is 0 or coalesce wasn't stored set coalesce
* to default value.
*/
static void
ice_vsi_rebuild_set_coalesce(struct ice_vsi *vsi,
struct ice_coalesce_stored *coalesce, int size)
{
int i;
if ((size && !coalesce) || !vsi)
return;
for (i = 0; i < size && i < vsi->num_q_vectors; i++)
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
&coalesce[i]);
for (; i < vsi->num_q_vectors; i++) {
struct ice_coalesce_stored coalesce_dflt = {
.itr_tx = ICE_DFLT_TX_ITR,
.itr_rx = ICE_DFLT_RX_ITR,
.intrl = 0
};
ice_vsi_rebuild_update_coalesce(vsi->q_vectors[i],
&coalesce_dflt);
}
}
/**
* ice_vsi_rebuild - Rebuild VSI after reset
* @vsi: VSI to be rebuild
......@@ -2411,6 +2504,8 @@ int ice_vsi_release(struct ice_vsi *vsi)
int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_coalesce_stored *coalesce;
int prev_num_q_vectors = 0;
struct ice_vf *vf = NULL;
enum ice_status status;
struct ice_pf *pf;
......@@ -2423,6 +2518,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
if (vsi->type == ICE_VSI_VF)
vf = &pf->vf[vsi->vf_id];
coalesce = kcalloc(vsi->num_q_vectors,
sizeof(struct ice_coalesce_stored), GFP_KERNEL);
if (coalesce)
prev_num_q_vectors = ice_vsi_rebuild_get_coalesce(vsi,
coalesce);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
ice_vsi_free_q_vectors(vsi);
......@@ -2535,6 +2635,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
return ice_schedule_reset(pf, ICE_RESET_PFR);
}
}
ice_vsi_rebuild_set_coalesce(vsi, coalesce, prev_num_q_vectors);
kfree(coalesce);
return 0;
err_vectors:
......@@ -2549,6 +2652,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
err_vsi:
ice_vsi_clear(vsi);
set_bit(__ICE_RESET_FAILED, pf->state);
kfree(coalesce);
return ret;
}
......@@ -2740,3 +2844,121 @@ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
return status;
}
/**
* ice_is_dflt_vsi_in_use - check if the default forwarding VSI is being used
* @sw: switch to check if its default forwarding VSI is free
*
* Return true if the default forwarding VSI is already being used, else returns
* false signalling that it's available to use.
*/
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw)
{
return (sw->dflt_vsi && sw->dflt_vsi_ena);
}
/**
* ice_is_vsi_dflt_vsi - check if the VSI passed in is the default VSI
* @sw: switch for the default forwarding VSI to compare against
* @vsi: VSI to compare against default forwarding VSI
*
* If this VSI passed in is the default forwarding VSI then return true, else
* return false
*/
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
{
return (sw->dflt_vsi == vsi && sw->dflt_vsi_ena);
}
/**
* ice_set_dflt_vsi - set the default forwarding VSI
* @sw: switch used to assign the default forwarding VSI
* @vsi: VSI getting set as the default forwarding VSI on the switch
*
* If the VSI passed in is already the default VSI and it's enabled just return
* success.
*
* If there is already a default VSI on the switch and it's enabled then return
* -EEXIST since there can only be one default VSI per switch.
*
* Otherwise try to set the VSI passed in as the switch's default VSI and
* return the result.
*/
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
{
enum ice_status status;
struct device *dev;
if (!sw || !vsi)
return -EINVAL;
dev = ice_pf_to_dev(vsi->back);
/* the VSI passed in is already the default VSI */
if (ice_is_vsi_dflt_vsi(sw, vsi)) {
dev_dbg(dev, "VSI %d passed in is already the default forwarding VSI, nothing to do\n",
vsi->vsi_num);
return 0;
}
/* another VSI is already the default VSI for this switch */
if (ice_is_dflt_vsi_in_use(sw)) {
dev_err(dev,
"Default forwarding VSI %d already in use, disable it and try again\n",
sw->dflt_vsi->vsi_num);
return -EEXIST;
}
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
dev_err(dev,
"Failed to set VSI %d as the default forwarding VSI, error %d\n",
vsi->vsi_num, status);
return -EIO;
}
sw->dflt_vsi = vsi;
sw->dflt_vsi_ena = true;
return 0;
}
/**
* ice_clear_dflt_vsi - clear the default forwarding VSI
* @sw: switch used to clear the default VSI
*
* If the switch has no default VSI or it's not enabled then return error.
*
* Otherwise try to clear the default VSI and return the result.
*/
int ice_clear_dflt_vsi(struct ice_sw *sw)
{
struct ice_vsi *dflt_vsi;
enum ice_status status;
struct device *dev;
if (!sw)
return -EINVAL;
dev = ice_pf_to_dev(sw->pf);
dflt_vsi = sw->dflt_vsi;
/* there is no default VSI configured */
if (!ice_is_dflt_vsi_in_use(sw))
return -ENODEV;
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
dev_err(dev,
"Failed to clear the default forwarding VSI %d, error %d\n",
dflt_vsi->vsi_num, status);
return -EIO;
}
sw->dflt_vsi = NULL;
sw->dflt_vsi_ena = false;
return 0;
}
......@@ -103,4 +103,12 @@ enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
bool ice_is_safe_mode(struct ice_pf *pf);
bool ice_is_dflt_vsi_in_use(struct ice_sw *sw);
bool ice_is_vsi_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi);
int ice_clear_dflt_vsi(struct ice_sw *sw);
#endif /* !_ICE_LIB_H_ */
......@@ -379,25 +379,29 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
clear_bit(ICE_VSI_FLAG_PROMISC_CHANGED, vsi->flags);
if (vsi->current_netdev_flags & IFF_PROMISC) {
/* Apply Rx filter rule to get traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, true,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error setting default VSI %i Rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags &= ~IFF_PROMISC;
err = -EIO;
goto out_promisc;
if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
err = ice_set_dflt_vsi(pf->first_sw, vsi);
if (err && err != -EEXIST) {
netdev_err(netdev,
"Error %d setting default VSI %i Rx rule\n",
err, vsi->vsi_num);
vsi->current_netdev_flags &=
~IFF_PROMISC;
goto out_promisc;
}
}
} else {
/* Clear Rx filter to remove traffic from wire */
status = ice_cfg_dflt_vsi(hw, vsi->idx, false,
ICE_FLTR_RX);
if (status) {
netdev_err(netdev, "Error clearing default VSI %i Rx rule\n",
vsi->vsi_num);
vsi->current_netdev_flags |= IFF_PROMISC;
err = -EIO;
goto out_promisc;
if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
err = ice_clear_dflt_vsi(pf->first_sw);
if (err) {
netdev_err(netdev,
"Error %d clearing default VSI %i Rx rule\n",
err, vsi->vsi_num);
vsi->current_netdev_flags |=
IFF_PROMISC;
goto out_promisc;
}
}
}
}
......@@ -472,7 +476,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_vc_notify_reset(pf);
/* Disable VFs until reset is completed */
for (i = 0; i < pf->num_alloc_vfs; i++)
ice_for_each_vf(pf, i)
ice_set_vf_state_qs_dis(&pf->vf[i]);
/* clear SW filtering DB */
......@@ -840,8 +844,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
if (pf->num_alloc_vfs)
ice_vc_notify_link_state(pf);
ice_vc_notify_link_state(pf);
return result;
}
......@@ -1291,7 +1294,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
}
/* check to see if one of the VFs caused the MDD */
for (i = 0; i < pf->num_alloc_vfs; i++) {
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
bool vf_mdd_detected = false;
......@@ -2330,7 +2333,8 @@ static void ice_set_netdev_features(struct net_device *netdev)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
tso_features = NETIF_F_TSO;
tso_features = NETIF_F_TSO |
NETIF_F_GSO_UDP_L4;
/* set features that user can change */
netdev->hw_features = dflt_features | csumo_features |
......@@ -3568,6 +3572,15 @@ static const struct pci_device_id ice_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E810C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_QSFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822C_SGMII), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822X_BACKPLANE), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SFP), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_10G_BASE_T), 0 },
{ PCI_VDEVICE(INTEL, ICE_DEV_ID_E822L_SGMII), 0 },
/* required last entry */
{ 0, }
};
......@@ -4670,6 +4683,13 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
goto err_init_ctrlq;
}
if (pf->first_sw->dflt_vsi_ena)
dev_info(dev,
"Clearing default VSI, re-enable after reset completes\n");
/* clear the default VSI configuration if it exists */
pf->first_sw->dflt_vsi = NULL;
pf->first_sw->dflt_vsi_ena = false;
ice_clear_pxe_mode(hw);
ret = ice_get_caps(hw);
......@@ -4825,7 +4845,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
}
}
netdev_info(netdev, "changed MTU to %d\n", new_mtu);
netdev_dbg(netdev, "changed MTU to %d\n", new_mtu);
return 0;
}
......
......@@ -289,6 +289,18 @@ enum ice_status ice_init_nvm(struct ice_hw *hw)
nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
/* the following devices do not have boot_cfg_tlv yet */
if (hw->device_id == ICE_DEV_ID_E822C_BACKPLANE ||
hw->device_id == ICE_DEV_ID_E822C_QSFP ||
hw->device_id == ICE_DEV_ID_E822C_10G_BASE_T ||
hw->device_id == ICE_DEV_ID_E822C_SGMII ||
hw->device_id == ICE_DEV_ID_E822C_SFP ||
hw->device_id == ICE_DEV_ID_E822X_BACKPLANE ||
hw->device_id == ICE_DEV_ID_E822L_SFP ||
hw->device_id == ICE_DEV_ID_E822L_10G_BASE_T ||
hw->device_id == ICE_DEV_ID_E822L_SGMII)
return status;
status = ice_get_pfa_module_tlv(hw, &boot_cfg_tlv, &boot_cfg_tlv_len,
ICE_SR_BOOT_CFG_PTR);
if (status) {
......
......@@ -1071,13 +1071,16 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
ice_put_rx_buf(rx_ring, rx_buf);
continue;
construct_skb:
if (skb)
if (skb) {
ice_add_rx_frag(rx_ring, rx_buf, skb, size);
else if (ice_ring_uses_build_skb(rx_ring))
skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else
} else if (likely(xdp.data)) {
if (ice_ring_uses_build_skb(rx_ring))
skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
} else {
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
}
/* exit if we failed to retrieve a buffer */
if (!skb) {
rx_ring->rx_stats.alloc_buf_failed++;
......@@ -1925,6 +1928,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
} ip;
union {
struct tcphdr *tcp;
struct udphdr *udp;
unsigned char *hdr;
} l4;
u64 cd_mss, cd_tso_len;
......@@ -1958,10 +1962,18 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
/* remove payload length from checksum */
paylen = skb->len - l4_start;
csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* compute length of segmentation header */
off->header_len = (l4.tcp->doff * 4) + l4_start;
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
/* compute length of UDP segmentation header */
off->header_len = sizeof(l4.udp) + l4_start;
} else {
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
/* compute length of TCP segmentation header */
off->header_len = (l4.tcp->doff * 4) + l4_start;
}
/* update gso_segs and bytecount */
first->gso_segs = skb_shinfo(skb)->gso_segs;
......
......@@ -341,6 +341,12 @@ struct ice_ring_container {
u16 itr_setting;
};
struct ice_coalesce_stored {
u16 itr_tx;
u16 itr_rx;
u8 intrl;
};
/* iterator for handling rings in ring container */
#define ice_for_each_ring(pos, head) \
for (pos = (head).ring; pos; pos = pos->next)
......
......@@ -34,37 +34,6 @@ static int ice_check_vf_init(struct ice_pf *pf, struct ice_vf *vf)
return 0;
}
/**
* ice_err_to_virt err - translate errors for VF return code
* @ice_err: error return code
*/
static enum virtchnl_status_code ice_err_to_virt_err(enum ice_status ice_err)
{
switch (ice_err) {
case ICE_SUCCESS:
return VIRTCHNL_STATUS_SUCCESS;
case ICE_ERR_BAD_PTR:
case ICE_ERR_INVAL_SIZE:
case ICE_ERR_DEVICE_NOT_SUPPORTED:
case ICE_ERR_PARAM:
case ICE_ERR_CFG:
return VIRTCHNL_STATUS_ERR_PARAM;
case ICE_ERR_NO_MEMORY:
return VIRTCHNL_STATUS_ERR_NO_MEMORY;
case ICE_ERR_NOT_READY:
case ICE_ERR_RESET_FAILED:
case ICE_ERR_FW_API_VER:
case ICE_ERR_AQ_ERROR:
case ICE_ERR_AQ_TIMEOUT:
case ICE_ERR_AQ_FULL:
case ICE_ERR_AQ_NO_WORK:
case ICE_ERR_AQ_EMPTY:
return VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
default:
return VIRTCHNL_STATUS_ERR_NOT_SUPPORTED;
}
}
/**
* ice_vc_vf_broadcast - Broadcast a message to all VFs on PF
* @pf: pointer to the PF structure
......@@ -78,10 +47,11 @@ ice_vc_vf_broadcast(struct ice_pf *pf, enum virtchnl_ops v_opcode,
enum virtchnl_status_code v_retval, u8 *msg, u16 msglen)
{
struct ice_hw *hw = &pf->hw;
struct ice_vf *vf = pf->vf;
int i;
for (i = 0; i < pf->num_alloc_vfs; i++, vf++) {
ice_for_each_vf(pf, i) {
struct ice_vf *vf = &pf->vf[i];
/* Not all vfs are enabled so skip the ones that are not */
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states))
......@@ -120,26 +90,6 @@ ice_set_pfe_link(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
}
}
/**
* ice_set_pfe_link_forced - Force the virtchnl_pf_event link speed/status
* @vf: pointer to the VF structure
* @pfe: pointer to the virtchnl_pf_event to set link speed/status for
* @link_up: whether or not to set the link up/down
*/
static void
ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
bool link_up)
{
u16 link_speed;
if (link_up)
link_speed = ICE_AQ_LINK_SPEED_100GB;
else
link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
ice_set_pfe_link(vf, pfe, link_speed, link_up);
}
/**
* ice_vc_notify_vf_link_state - Inform a VF of link status
* @vf: pointer to the VF structure
......@@ -160,13 +110,17 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
pfe.severity = PF_EVENT_SEVERITY_INFO;
/* Always report link is down if the VF queues aren't enabled */
if (!vf->num_qs_ena)
if (!vf->num_qs_ena) {
ice_set_pfe_link(vf, &pfe, ICE_AQ_LINK_SPEED_UNKNOWN, false);
else if (vf->link_forced)
ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
else
ice_set_pfe_link(vf, &pfe, ls->link_speed, ls->link_info &
ICE_AQ_LINK_UP);
} else if (vf->link_forced) {
u16 link_speed = vf->link_up ?
ls->link_speed : ICE_AQ_LINK_SPEED_UNKNOWN;
ice_set_pfe_link(vf, &pfe, link_speed, vf->link_up);
} else {
ice_set_pfe_link(vf, &pfe, ls->link_speed,
ls->link_info & ICE_AQ_LINK_UP);
}
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
......@@ -331,7 +285,7 @@ void ice_free_vfs(struct ice_pf *pf)
usleep_range(1000, 2000);
/* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++)
ice_for_each_vf(pf, i)
if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
ice_dis_vf_qs(&pf->vf[i]);
......@@ -991,10 +945,17 @@ static void ice_cleanup_and_realloc_vf(struct ice_vf *vf)
/* reallocate VF resources to finish resetting the VSI state */
if (!ice_alloc_vf_res(vf)) {
struct ice_vsi *vsi;
ice_ena_vf_mappings(vf);
set_bit(ICE_VF_STATE_ACTIVE, vf->vf_states);
clear_bit(ICE_VF_STATE_DIS, vf->vf_states);
vf->num_vlan = 0;
vsi = pf->vsi[vf->lan_vsi_idx];
if (ice_vsi_add_vlan(vsi, 0))
dev_warn(ice_pf_to_dev(pf),
"Failed to add VLAN 0 filter for VF %d, MDD events will trigger. Reset the VF, disable spoofchk, or enable 8021q module on the guest",
vf->vf_id);
}
/* Tell the VF driver the reset is done. This needs to be done only
......@@ -1023,7 +984,7 @@ ice_vf_set_vsi_promisc(struct ice_vf *vf, struct ice_vsi *vsi, u8 promisc_m,
struct ice_hw *hw;
hw = &pf->hw;
if (vf->num_vlan) {
if (vsi->num_vlan) {
status = ice_set_vlan_vsi_promisc(hw, vsi->idx, promisc_m,
rm_promisc);
} else if (vf->port_vlan_id) {
......@@ -1070,7 +1031,7 @@ static bool ice_config_res_vfs(struct ice_pf *pf)
ice_irq_dynamic_ena(hw, NULL, NULL);
/* Finish resetting each VF and allocate resources */
for (v = 0; v < pf->num_alloc_vfs; v++) {
ice_for_each_vf(pf, v) {
struct ice_vf *vf = &pf->vf[v];
vf->num_vf_qs = pf->num_vf_qps;
......@@ -1113,10 +1074,10 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
return false;
/* Begin reset on all VFs at once */
for (v = 0; v < pf->num_alloc_vfs; v++)
ice_for_each_vf(pf, v)
ice_trigger_vf_reset(&pf->vf[v], is_vflr, true);
for (v = 0; v < pf->num_alloc_vfs; v++) {
ice_for_each_vf(pf, v) {
struct ice_vsi *vsi;
vf = &pf->vf[v];
......@@ -1161,7 +1122,7 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
dev_warn(dev, "VF reset check timeout\n");
/* free VF resources to begin resetting the VSI state */
for (v = 0; v < pf->num_alloc_vfs; v++) {
ice_for_each_vf(pf, v) {
vf = &pf->vf[v];
ice_free_vf_res(vf);
......@@ -1273,7 +1234,7 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
*/
if (test_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states) ||
test_bit(ICE_VF_STATE_MC_PROMISC, vf->vf_states)) {
if (vf->port_vlan_id || vf->num_vlan)
if (vf->port_vlan_id || vsi->num_vlan)
promisc_m = ICE_UCAST_VLAN_PROMISC_BITS;
else
promisc_m = ICE_UCAST_PROMISC_BITS;
......@@ -1301,7 +1262,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf)
{
int i;
for (i = 0; i < pf->num_alloc_vfs; i++)
ice_for_each_vf(pf, i)
ice_vc_notify_vf_link_state(&pf->vf[i]);
}
......@@ -1385,9 +1346,10 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
goto err_pci_disable_sriov;
}
pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
/* apply default profile */
for (i = 0; i < num_alloc_vfs; i++) {
ice_for_each_vf(pf, i) {
vfs[i].pf = pf;
vfs[i].vf_sw_id = pf->first_sw;
vfs[i].vf_id = i;
......@@ -1396,7 +1358,6 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
set_bit(ICE_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
vfs[i].spoofchk = true;
}
pf->num_alloc_vfs = num_alloc_vfs;
/* VF resources get allocated with initialization */
if (!ice_config_res_vfs(pf)) {
......@@ -1535,7 +1496,7 @@ void ice_process_vflr_event(struct ice_pf *pf)
!pf->num_alloc_vfs)
return;
for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) {
ice_for_each_vf(pf, vf_id) {
struct ice_vf *vf = &pf->vf[vf_id];
u32 reg_idx, bit_idx;
......@@ -1917,6 +1878,89 @@ static int ice_vc_config_rss_lut(struct ice_vf *vf, u8 *msg)
NULL, 0);
}
/**
* ice_set_vf_spoofchk
* @netdev: network interface device structure
* @vf_id: VF identifier
* @ena: flag to enable or disable feature
*
* Enable or disable VF spoof checking
*/
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_pf *pf = np->vsi->back;
struct ice_vsi_ctx *ctx;
struct ice_vsi *vf_vsi;
enum ice_status status;
struct device *dev;
struct ice_vf *vf;
int ret = 0;
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
if (ice_check_vf_init(pf, vf))
return -EBUSY;
vf_vsi = pf->vsi[vf->lan_vsi_idx];
if (!vf_vsi) {
netdev_err(netdev, "VSI %d for VF %d is null\n",
vf->lan_vsi_idx, vf->vf_id);
return -EINVAL;
}
if (vf_vsi->type != ICE_VSI_VF) {
netdev_err(netdev,
"Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
return -ENODEV;
}
if (ena == vf->spoofchk) {
dev_dbg(dev, "VF spoofchk already %s\n", ena ? "ON" : "OFF");
return 0;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->info.sec_flags = vf_vsi->info.sec_flags;
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
if (ena) {
ctx->info.sec_flags |=
ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
} else {
ctx->info.sec_flags &=
~(ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF |
(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S));
}
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
dev_err(dev,
"Failed to %sable spoofchk on VF %d VSI %d\n error %d",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
ret = -EIO;
goto out;
}
/* only update spoofchk state and VSI context on success */
vf_vsi->info.sec_flags = ctx->info.sec_flags;
vf->spoofchk = ena;
out:
kfree(ctx);
return ret;
}
/**
* ice_vc_get_stats_msg
* @vf: pointer to the VF info
......@@ -2408,6 +2452,83 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
return true;
}
/**
* ice_vc_add_mac_addr - attempt to add the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
* @mac_addr: MAC address to add
*/
static int
ice_vc_add_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
{
struct device *dev = ice_pf_to_dev(vf->pf);
enum ice_status status;
/* default unicast MAC already added */
if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
return 0;
if (is_unicast_ether_addr(mac_addr) && !ice_can_vf_change_mac(vf)) {
dev_err(dev, "VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
return -EPERM;
}
status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, true);
if (status == ICE_ERR_ALREADY_EXISTS) {
dev_err(dev, "MAC %pM already exists for VF %d\n", mac_addr,
vf->vf_id);
return -EEXIST;
} else if (status) {
dev_err(dev, "Failed to add MAC %pM for VF %d\n, error %d\n",
mac_addr, vf->vf_id, status);
return -EIO;
}
/* only set dflt_lan_addr once */
if (is_zero_ether_addr(vf->dflt_lan_addr.addr) &&
is_unicast_ether_addr(mac_addr))
ether_addr_copy(vf->dflt_lan_addr.addr, mac_addr);
vf->num_mac++;
return 0;
}
/**
* ice_vc_del_mac_addr - attempt to delete the MAC address passed in
* @vf: pointer to the VF info
* @vsi: pointer to the VF's VSI
* @mac_addr: MAC address to delete
*/
static int
ice_vc_del_mac_addr(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr)
{
struct device *dev = ice_pf_to_dev(vf->pf);
enum ice_status status;
if (!ice_can_vf_change_mac(vf) &&
ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
return 0;
status = ice_vsi_cfg_mac_fltr(vsi, mac_addr, false);
if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_err(dev, "MAC %pM does not exist for VF %d\n", mac_addr,
vf->vf_id);
return -ENOENT;
} else if (status) {
dev_err(dev, "Failed to delete MAC %pM for VF %d, error %d\n",
mac_addr, vf->vf_id, status);
return -EIO;
}
if (ether_addr_equal(mac_addr, vf->dflt_lan_addr.addr))
eth_zero_addr(vf->dflt_lan_addr.addr);
vf->num_mac--;
return 0;
}
/**
* ice_vc_handle_mac_addr_msg
* @vf: pointer to the VF info
......@@ -2419,23 +2540,23 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
{
int (*ice_vc_cfg_mac)
(struct ice_vf *vf, struct ice_vsi *vsi, u8 *mac_addr);
enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS;
struct virtchnl_ether_addr_list *al =
(struct virtchnl_ether_addr_list *)msg;
struct ice_pf *pf = vf->pf;
enum virtchnl_ops vc_op;
enum ice_status status;
struct ice_vsi *vsi;
struct device *dev;
int mac_count = 0;
int i;
dev = ice_pf_to_dev(pf);
if (set)
if (set) {
vc_op = VIRTCHNL_OP_ADD_ETH_ADDR;
else
ice_vc_cfg_mac = ice_vc_add_mac_addr;
} else {
vc_op = VIRTCHNL_OP_DEL_ETH_ADDR;
ice_vc_cfg_mac = ice_vc_del_mac_addr;
}
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!ice_vc_isvalid_vsi_id(vf, al->vsi_id)) {
......@@ -2443,14 +2564,15 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
goto handle_mac_exit;
}
/* If this VF is not privileged, then we can't add more than a
* limited number of addresses. Check to make sure that the
* additions do not push us over the limit.
*/
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
dev_err(dev,
dev_err(ice_pf_to_dev(pf),
"Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id);
/* There is no need to let VF know about not being trusted
* to add more MAC addr, so we can just return success message.
*/
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
......@@ -2462,70 +2584,22 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
}
for (i = 0; i < al->num_elements; i++) {
u8 *maddr = al->list[i].addr;
u8 *mac_addr = al->list[i].addr;
int result;
if (ether_addr_equal(maddr, vf->dflt_lan_addr.addr) ||
is_broadcast_ether_addr(maddr)) {
if (set) {
/* VF is trying to add filters that the PF
* already added. Just continue.
*/
dev_info(dev,
"MAC %pM already set for VF %d\n",
maddr, vf->vf_id);
continue;
} else {
/* VF can't remove dflt_lan_addr/bcast MAC */
dev_err(dev,
"VF can't remove default MAC address or MAC %pM programmed by PF for VF %d\n",
maddr, vf->vf_id);
continue;
}
}
/* check for the invalid cases and bail if necessary */
if (is_zero_ether_addr(maddr)) {
dev_err(dev,
"invalid MAC %pM provided for VF %d\n",
maddr, vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (is_unicast_ether_addr(maddr) &&
!ice_can_vf_change_mac(vf)) {
dev_err(dev,
"can't change unicast MAC for untrusted VF %d\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
}
if (is_broadcast_ether_addr(mac_addr) ||
is_zero_ether_addr(mac_addr))
continue;
/* program the updated filter list */
status = ice_vsi_cfg_mac_fltr(vsi, maddr, set);
if (status == ICE_ERR_DOES_NOT_EXIST ||
status == ICE_ERR_ALREADY_EXISTS) {
dev_info(dev,
"can't %s MAC filters %pM for VF %d, error %d\n",
set ? "add" : "remove", maddr, vf->vf_id,
status);
} else if (status) {
dev_err(dev,
"can't %s MAC filters for VF %d, error %d\n",
set ? "add" : "remove", vf->vf_id, status);
v_ret = ice_err_to_virt_err(status);
result = ice_vc_cfg_mac(vf, vsi, mac_addr);
if (result == -EEXIST || result == -ENOENT) {
continue;
} else if (result) {
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
goto handle_mac_exit;
}
mac_count++;
}
/* Track number of MAC filters programmed for the VF VSI */
if (set)
vf->num_mac += mac_count;
else
vf->num_mac -= mac_count;
handle_mac_exit:
/* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, vc_op, v_ret, NULL, 0);
......@@ -2744,17 +2818,6 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
if (add_v && !ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being not trusted,
* so we can just return success message here
*/
goto error_param;
}
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
......@@ -2771,6 +2834,17 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
if (add_v && !ice_is_vf_trusted(vf) &&
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being not trusted,
* so we can just return success message here
*/
goto error_param;
}
if (vsi->info.pvid) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
......@@ -2785,7 +2859,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
u16 vid = vfl->vlan_id[i];
if (!ice_is_vf_trusted(vf) &&
vf->num_vlan >= ICE_MAX_VLAN_PER_VF) {
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(dev,
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
......@@ -2796,12 +2870,20 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
goto error_param;
}
if (ice_vsi_add_vlan(vsi, vid)) {
/* we add VLAN 0 by default for each VF so we can enable
* Tx VLAN anti-spoof without triggering MDD events so
* we don't need to add it again here
*/
if (!vid)
continue;
status = ice_vsi_add_vlan(vsi, vid);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vf->num_vlan++;
vsi->num_vlan++;
/* Enable VLAN pruning when VLAN is added */
if (!vlan_promisc) {
status = ice_cfg_vlan_pruning(vsi, true, false);
......@@ -2837,21 +2919,29 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
*/
int num_vf_vlan;
num_vf_vlan = vf->num_vlan;
num_vf_vlan = vsi->num_vlan;
for (i = 0; i < vfl->num_elements && i < num_vf_vlan; i++) {
u16 vid = vfl->vlan_id[i];
/* we add VLAN 0 by default for each VF so we can enable
* Tx VLAN anti-spoof without triggering MDD events so
* we don't want a VIRTCHNL request to remove it
*/
if (!vid)
continue;
/* Make sure ice_vsi_kill_vlan is successful before
* updating VLAN information
*/
if (ice_vsi_kill_vlan(vsi, vid)) {
status = ice_vsi_kill_vlan(vsi, vid);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vf->num_vlan--;
vsi->num_vlan--;
/* Disable VLAN pruning when the last VLAN is removed */
if (!vf->num_vlan)
if (!vsi->num_vlan)
ice_cfg_vlan_pruning(vsi, false, false);
/* Disable Unicast/Multicast VLAN promiscuous mode */
......@@ -3164,65 +3254,6 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
return 0;
}
/**
* ice_set_vf_spoofchk
* @netdev: network interface device structure
* @vf_id: VF identifier
* @ena: flag to enable or disable feature
*
* Enable or disable VF spoof checking
*/
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vsi *vsi = pf->vsi[0];
struct ice_vsi_ctx *ctx;
enum ice_status status;
struct device *dev;
struct ice_vf *vf;
int ret = 0;
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
if (ice_check_vf_init(pf, vf))
return -EBUSY;
if (ena == vf->spoofchk) {
dev_dbg(dev, "VF spoofchk already %s\n",
ena ? "ON" : "OFF");
return 0;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SECURITY_VALID);
if (ena) {
ctx->info.sec_flags |= ICE_AQ_VSI_SEC_FLAG_ENA_MAC_ANTI_SPOOF;
ctx->info.sw_flags2 |= ICE_AQ_VSI_SW_FLAG_RX_PRUNE_EN_M;
}
status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) {
dev_dbg(dev,
"Error %d, failed to update VSI* parameters\n", status);
ret = -EIO;
goto out;
}
vf->spoofchk = ena;
vsi->info.sec_flags = ctx->info.sec_flags;
vsi->info.sw_flags2 = ctx->info.sw_flags2;
out:
kfree(ctx);
return ret;
}
/**
* ice_wait_on_vf_reset
* @vf: The VF being resseting
......@@ -3344,28 +3375,18 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct virtchnl_pf_event pfe = { 0 };
struct ice_link_status *ls;
struct ice_vf *vf;
struct ice_hw *hw;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
hw = &pf->hw;
ls = &pf->hw.port_info->phy.link_info;
if (ice_check_vf_init(pf, vf))
return -EBUSY;
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
pfe.severity = PF_EVENT_SEVERITY_INFO;
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
vf->link_forced = false;
vf->link_up = ls->link_info & ICE_AQ_LINK_UP;
break;
case IFLA_VF_LINK_STATE_ENABLE:
vf->link_forced = true;
......@@ -3379,15 +3400,7 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
return -EINVAL;
}
if (vf->link_forced)
ice_set_pfe_link_forced(vf, &pfe, vf->link_up);
else
ice_set_pfe_link(vf, &pfe, ls->link_speed, vf->link_up);
/* Notify the VF of its new link state */
ice_aq_send_msg_to_vf(hw, vf->vf_id, VIRTCHNL_OP_EVENT,
VIRTCHNL_STATUS_SUCCESS, (u8 *)&pfe,
sizeof(pfe), NULL);
ice_vc_notify_vf_link_state(vf);
return 0;
}
......
......@@ -40,6 +40,9 @@
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_WAIT 15
#define ice_for_each_vf(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
/* Specific VF states */
enum ice_vf_states {
ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
......@@ -91,7 +94,6 @@ struct ice_vf {
unsigned long vf_caps; /* VF's adv. capabilities */
u8 num_req_qs; /* num of queue pairs requested by VF */
u16 num_mac;
u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */
u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
};
......
......@@ -414,7 +414,8 @@ ice_xsk_umem_enable(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid)
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
if (!vsi->num_xsk_umems)
vsi->num_xsk_umems = min_t(u16, vsi->num_rxq, vsi->num_txq);
if (qid >= vsi->num_xsk_umems)
return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment