Commit 67eeadf2 authored by Paolo Abeni's avatar Paolo Abeni

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-03-07 (ice)

This series contains updates to ice driver only.

Dave removes masking from pfcena field as it was incorrectly preventing
valid traffic classes from being enabled.

Michal resolves various smatch issues such as not propagating error
codes and returning 0 explicitly.

Arnd Bergmann resolves gcc-9 warning for integer overflow.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ethernet: ice: avoid gcc-9 integer overflow warning
  ice: don't ignore return codes in VSI related code
  ice: Fix DSCP PFC TLV creation
====================

Link: https://lore.kernel.org/r/20230307220714.3997294-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarPaolo Abeni <pabeni@redhat.com>
parents 2d8cb0bf 8f5c5a79
...@@ -1411,7 +1411,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg) ...@@ -1411,7 +1411,7 @@ ice_add_dscp_pfc_tlv(struct ice_lldp_org_tlv *tlv, struct ice_dcbx_cfg *dcbcfg)
tlv->ouisubtype = htonl(ouisubtype); tlv->ouisubtype = htonl(ouisubtype);
buf[0] = dcbcfg->pfc.pfccap & 0xF; buf[0] = dcbcfg->pfc.pfccap & 0xF;
buf[1] = dcbcfg->pfc.pfcena & 0xF; buf[1] = dcbcfg->pfc.pfcena;
} }
/** /**
......
...@@ -2126,7 +2126,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi) ...@@ -2126,7 +2126,7 @@ int ice_vsi_cfg_xdp_txqs(struct ice_vsi *vsi)
ice_for_each_rxq(vsi, i) ice_for_each_rxq(vsi, i)
ice_tx_xsk_pool(vsi, i); ice_tx_xsk_pool(vsi, i);
return ret; return 0;
} }
/** /**
...@@ -2693,12 +2693,14 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ...@@ -2693,12 +2693,14 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
return ret; return ret;
/* allocate memory for Tx/Rx ring stat pointers */ /* allocate memory for Tx/Rx ring stat pointers */
if (ice_vsi_alloc_stat_arrays(vsi)) ret = ice_vsi_alloc_stat_arrays(vsi);
if (ret)
goto unroll_vsi_alloc; goto unroll_vsi_alloc;
ice_alloc_fd_res(vsi); ice_alloc_fd_res(vsi);
if (ice_vsi_get_qs(vsi)) { ret = ice_vsi_get_qs(vsi);
if (ret) {
dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n", dev_err(dev, "Failed to allocate queues. vsi->idx = %d\n",
vsi->idx); vsi->idx);
goto unroll_vsi_alloc_stat; goto unroll_vsi_alloc_stat;
...@@ -2811,6 +2813,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params) ...@@ -2811,6 +2813,7 @@ ice_vsi_cfg_def(struct ice_vsi *vsi, struct ice_vsi_cfg_params *params)
break; break;
default: default:
/* clean up the resources and exit */ /* clean up the resources and exit */
ret = -EINVAL;
goto unroll_vsi_init; goto unroll_vsi_init;
} }
...@@ -3508,11 +3511,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags) ...@@ -3508,11 +3511,11 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, u32 vsi_flags)
if (vsi_flags & ICE_VSI_FLAG_INIT) { if (vsi_flags & ICE_VSI_FLAG_INIT) {
ret = -EIO; ret = -EIO;
goto err_vsi_cfg_tc_lan; goto err_vsi_cfg_tc_lan;
} else { }
kfree(coalesce); kfree(coalesce);
return ice_schedule_reset(pf, ICE_RESET_PFR); return ice_schedule_reset(pf, ICE_RESET_PFR);
} }
}
ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq); ice_vsi_realloc_stat_arrays(vsi, prev_txq, prev_rxq);
...@@ -3759,7 +3762,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ...@@ -3759,7 +3762,7 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
if (vsi->tc_cfg.ena_tc == ena_tc && if (vsi->tc_cfg.ena_tc == ena_tc &&
vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL) vsi->mqprio_qopt.mode != TC_MQPRIO_MODE_CHANNEL)
return ret; return 0;
ice_for_each_traffic_class(i) { ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */ /* build bitmap of enabled TCs */
......
...@@ -1455,8 +1455,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, ...@@ -1455,8 +1455,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_priority) { if (match.mask->vlan_priority) {
fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO; fltr->flags |= ICE_TC_FLWR_FIELD_VLAN_PRIO;
headers->vlan_hdr.vlan_prio = headers->vlan_hdr.vlan_prio =
cpu_to_be16((match.key->vlan_priority << be16_encode_bits(match.key->vlan_priority,
VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK); VLAN_PRIO_MASK);
} }
if (match.mask->vlan_tpid) if (match.mask->vlan_tpid)
...@@ -1489,8 +1489,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, ...@@ -1489,8 +1489,8 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi,
if (match.mask->vlan_priority) { if (match.mask->vlan_priority) {
fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO; fltr->flags |= ICE_TC_FLWR_FIELD_CVLAN_PRIO;
headers->cvlan_hdr.vlan_prio = headers->cvlan_hdr.vlan_prio =
cpu_to_be16((match.key->vlan_priority << be16_encode_bits(match.key->vlan_priority,
VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK); VLAN_PRIO_MASK);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment