Commit 096c0fa4 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2024-09-30 (ice, idpf)

This series contains updates to ice and idpf drivers:

For ice:

Michal corrects setting of dst VSI on LAN filters and adds clearing of
port VLAN configuration during reset.

Gui-Dong Han corrects failures to decrement refcount in some error
paths.

Przemek resolves a memory leak in ice_init_tx_topology().

Arkadiusz prevents setting of DPLL_PIN_STATE_SELECTABLE to an improper
value.

Dave stops clearing of VLAN tracking bit to allow for VLANs to be properly
restored after reset.

For idpf:

Ahmed sets uninitialized dyn_ctl_intrvl_s value.

Josh corrects use and reporting of mailbox size.

Larysa corrects order of function calls during de-initialization.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  idpf: deinit virtchnl transaction manager after vport and vectors
  idpf: use actual mbx receive payload length
  idpf: fix VF dynamic interrupt ctl register initialization
  ice: fix VLAN replay after reset
  ice: disallow DPLL_PIN_STATE_SELECTABLE for dpll output pins
  ice: fix memleak in ice_init_tx_topology()
  ice: clear port vlan config during reset
  ice: Fix improper handling of refcount in ice_sriov_set_msix_vec_count()
  ice: Fix improper handling of refcount in ice_dpll_init_rclk_pins()
  ice: set correct dst VSI in only LAN filters
====================

Link: https://patch.msgid.link/20240930223601.3137464-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 2d7a098b 09d0fb5c
...@@ -31,7 +31,7 @@ static const struct ice_tunnel_type_scan tnls[] = { ...@@ -31,7 +31,7 @@ static const struct ice_tunnel_type_scan tnls[] = {
* Verifies various attributes of the package file, including length, format * Verifies various attributes of the package file, including length, format
* version, and the requirement of at least one segment. * version, and the requirement of at least one segment.
*/ */
static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) static enum ice_ddp_state ice_verify_pkg(const struct ice_pkg_hdr *pkg, u32 len)
{ {
u32 seg_count; u32 seg_count;
u32 i; u32 i;
...@@ -57,13 +57,13 @@ static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len) ...@@ -57,13 +57,13 @@ static enum ice_ddp_state ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
/* all segments must fit within length */ /* all segments must fit within length */
for (i = 0; i < seg_count; i++) { for (i = 0; i < seg_count; i++) {
u32 off = le32_to_cpu(pkg->seg_offset[i]); u32 off = le32_to_cpu(pkg->seg_offset[i]);
struct ice_generic_seg_hdr *seg; const struct ice_generic_seg_hdr *seg;
/* segment header must fit */ /* segment header must fit */
if (len < off + sizeof(*seg)) if (len < off + sizeof(*seg))
return ICE_DDP_PKG_INVALID_FILE; return ICE_DDP_PKG_INVALID_FILE;
seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off); seg = (void *)pkg + off;
/* segment body must fit */ /* segment body must fit */
if (len < off + le32_to_cpu(seg->seg_size)) if (len < off + le32_to_cpu(seg->seg_size))
...@@ -119,13 +119,13 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver) ...@@ -119,13 +119,13 @@ static enum ice_ddp_state ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
* *
* This helper function validates a buffer's header. * This helper function validates a buffer's header.
*/ */
static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf) static const struct ice_buf_hdr *ice_pkg_val_buf(const struct ice_buf *buf)
{ {
struct ice_buf_hdr *hdr; const struct ice_buf_hdr *hdr;
u16 section_count; u16 section_count;
u16 data_end; u16 data_end;
hdr = (struct ice_buf_hdr *)buf->buf; hdr = (const struct ice_buf_hdr *)buf->buf;
/* verify data */ /* verify data */
section_count = le16_to_cpu(hdr->section_count); section_count = le16_to_cpu(hdr->section_count);
if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT) if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
...@@ -165,8 +165,8 @@ static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg) ...@@ -165,8 +165,8 @@ static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
* unexpected value has been detected (for example an invalid section count or * unexpected value has been detected (for example an invalid section count or
* an invalid buffer end value). * an invalid buffer end value).
*/ */
static struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg, static const struct ice_buf_hdr *ice_pkg_enum_buf(struct ice_seg *ice_seg,
struct ice_pkg_enum *state) struct ice_pkg_enum *state)
{ {
if (ice_seg) { if (ice_seg) {
state->buf_table = ice_find_buf_table(ice_seg); state->buf_table = ice_find_buf_table(ice_seg);
...@@ -1800,9 +1800,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count) ...@@ -1800,9 +1800,9 @@ int ice_update_pkg(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
* success it returns a pointer to the segment header, otherwise it will * success it returns a pointer to the segment header, otherwise it will
* return NULL. * return NULL.
*/ */
static struct ice_generic_seg_hdr * static const struct ice_generic_seg_hdr *
ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
struct ice_pkg_hdr *pkg_hdr) const struct ice_pkg_hdr *pkg_hdr)
{ {
u32 i; u32 i;
...@@ -1813,11 +1813,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type, ...@@ -1813,11 +1813,9 @@ ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
/* Search all package segments for the requested segment type */ /* Search all package segments for the requested segment type */
for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) { for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
struct ice_generic_seg_hdr *seg; const struct ice_generic_seg_hdr *seg;
seg = (struct ice_generic_seg_hdr seg = (void *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]);
*)((u8 *)pkg_hdr +
le32_to_cpu(pkg_hdr->seg_offset[i]));
if (le32_to_cpu(seg->seg_type) == seg_type) if (le32_to_cpu(seg->seg_type) == seg_type)
return seg; return seg;
...@@ -2354,12 +2352,12 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size, ...@@ -2354,12 +2352,12 @@ ice_get_set_tx_topo(struct ice_hw *hw, u8 *buf, u16 buf_size,
* *
* Return: zero when update was successful, negative values otherwise. * Return: zero when update was successful, negative values otherwise.
*/ */
int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len)
{ {
u8 *current_topo, *new_topo = NULL; u8 *new_topo = NULL, *topo __free(kfree) = NULL;
struct ice_run_time_cfg_seg *seg; const struct ice_run_time_cfg_seg *seg;
struct ice_buf_hdr *section; const struct ice_buf_hdr *section;
struct ice_pkg_hdr *pkg_hdr; const struct ice_pkg_hdr *pkg_hdr;
enum ice_ddp_state state; enum ice_ddp_state state;
u16 offset, size = 0; u16 offset, size = 0;
u32 reg = 0; u32 reg = 0;
...@@ -2375,15 +2373,13 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) ...@@ -2375,15 +2373,13 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
current_topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL); topo = kzalloc(ICE_AQ_MAX_BUF_LEN, GFP_KERNEL);
if (!current_topo) if (!topo)
return -ENOMEM; return -ENOMEM;
/* Get the current Tx topology */ /* Get the current Tx topology flags */
status = ice_get_set_tx_topo(hw, current_topo, ICE_AQ_MAX_BUF_LEN, NULL, status = ice_get_set_tx_topo(hw, topo, ICE_AQ_MAX_BUF_LEN, NULL, &flags,
&flags, false); false);
kfree(current_topo);
if (status) { if (status) {
ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n"); ice_debug(hw, ICE_DBG_INIT, "Get current topology is failed\n");
...@@ -2419,7 +2415,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) ...@@ -2419,7 +2415,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
goto update_topo; goto update_topo;
} }
pkg_hdr = (struct ice_pkg_hdr *)buf; pkg_hdr = (const struct ice_pkg_hdr *)buf;
state = ice_verify_pkg(pkg_hdr, len); state = ice_verify_pkg(pkg_hdr, len);
if (state) { if (state) {
ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n", ice_debug(hw, ICE_DBG_INIT, "Failed to verify pkg (err: %d)\n",
...@@ -2428,7 +2424,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) ...@@ -2428,7 +2424,7 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
} }
/* Find runtime configuration segment */ /* Find runtime configuration segment */
seg = (struct ice_run_time_cfg_seg *) seg = (const struct ice_run_time_cfg_seg *)
ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr); ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE_RUN_TIME_CFG, pkg_hdr);
if (!seg) { if (!seg) {
ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n"); ice_debug(hw, ICE_DBG_INIT, "5 layer topology segment is missing\n");
...@@ -2461,8 +2457,10 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len) ...@@ -2461,8 +2457,10 @@ int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len)
return -EIO; return -EIO;
} }
/* Get the new topology buffer */ /* Get the new topology buffer, reuse current topo copy mem */
new_topo = ((u8 *)section) + offset; static_assert(ICE_PKG_BUF_SIZE == ICE_AQ_MAX_BUF_LEN);
new_topo = topo;
memcpy(new_topo, (u8 *)section + offset, size);
update_topo: update_topo:
/* Acquire global lock to make sure that set topology issued /* Acquire global lock to make sure that set topology issued
......
...@@ -438,7 +438,7 @@ struct ice_pkg_enum { ...@@ -438,7 +438,7 @@ struct ice_pkg_enum {
u32 buf_idx; u32 buf_idx;
u32 type; u32 type;
struct ice_buf_hdr *buf; const struct ice_buf_hdr *buf;
u32 sect_idx; u32 sect_idx;
void *sect; void *sect;
u32 sect_type; u32 sect_type;
...@@ -467,6 +467,6 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state, ...@@ -467,6 +467,6 @@ ice_pkg_enum_entry(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state, void *ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
u32 sect_type); u32 sect_type);
int ice_cfg_tx_topo(struct ice_hw *hw, u8 *buf, u32 len); int ice_cfg_tx_topo(struct ice_hw *hw, const void *buf, u32 len);
#endif #endif
...@@ -656,6 +656,8 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv, ...@@ -656,6 +656,8 @@ ice_dpll_output_state_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_dpll_pin *p = pin_priv; struct ice_dpll_pin *p = pin_priv;
struct ice_dpll *d = dpll_priv; struct ice_dpll *d = dpll_priv;
if (state == DPLL_PIN_STATE_SELECTABLE)
return -EINVAL;
if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED) if (!enable && p->state[d->dpll_idx] == DPLL_PIN_STATE_DISCONNECTED)
return 0; return 0;
...@@ -1843,6 +1845,8 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, ...@@ -1843,6 +1845,8 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
struct dpll_pin *parent; struct dpll_pin *parent;
int ret, i; int ret, i;
if (WARN_ON((!vsi || !vsi->netdev)))
return -EINVAL;
ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF, ret = ice_dpll_get_pins(pf, pin, start_idx, ICE_DPLL_RCLK_NUM_PER_PF,
pf->dplls.clock_id); pf->dplls.clock_id);
if (ret) if (ret)
...@@ -1858,8 +1862,6 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin, ...@@ -1858,8 +1862,6 @@ ice_dpll_init_rclk_pins(struct ice_pf *pf, struct ice_dpll_pin *pin,
if (ret) if (ret)
goto unregister_pins; goto unregister_pins;
} }
if (WARN_ON((!vsi || !vsi->netdev)))
return -EINVAL;
dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin); dpll_netdev_pin_set(vsi->netdev, pf->dplls.rclk.pin);
return 0; return 0;
......
...@@ -4536,16 +4536,10 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware) ...@@ -4536,16 +4536,10 @@ ice_init_tx_topology(struct ice_hw *hw, const struct firmware *firmware)
u8 num_tx_sched_layers = hw->num_tx_sched_layers; u8 num_tx_sched_layers = hw->num_tx_sched_layers;
struct ice_pf *pf = hw->back; struct ice_pf *pf = hw->back;
struct device *dev; struct device *dev;
u8 *buf_copy;
int err; int err;
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
/* ice_cfg_tx_topo buf argument is not a constant, err = ice_cfg_tx_topo(hw, firmware->data, firmware->size);
* so we have to make a copy
*/
buf_copy = kmemdup(firmware->data, firmware->size, GFP_KERNEL);
err = ice_cfg_tx_topo(hw, buf_copy, firmware->size);
if (!err) { if (!err) {
if (hw->num_tx_sched_layers > num_tx_sched_layers) if (hw->num_tx_sched_layers > num_tx_sched_layers)
dev_info(dev, "Tx scheduling layers switching feature disabled\n"); dev_info(dev, "Tx scheduling layers switching feature disabled\n");
......
...@@ -1096,8 +1096,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) ...@@ -1096,8 +1096,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
return -ENOENT; return -ENOENT;
vsi = ice_get_vf_vsi(vf); vsi = ice_get_vf_vsi(vf);
if (!vsi) if (!vsi) {
ice_put_vf(vf);
return -ENOENT; return -ENOENT;
}
prev_msix = vf->num_msix; prev_msix = vf->num_msix;
prev_queues = vf->num_vf_qs; prev_queues = vf->num_vf_qs;
...@@ -1142,8 +1144,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count) ...@@ -1142,8 +1144,10 @@ int ice_sriov_set_msix_vec_count(struct pci_dev *vf_dev, int msix_vec_count)
vf->num_msix = prev_msix; vf->num_msix = prev_msix;
vf->num_vf_qs = prev_queues; vf->num_vf_qs = prev_queues;
vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix); vf->first_vector_idx = ice_sriov_get_irqs(pf, vf->num_msix);
if (vf->first_vector_idx < 0) if (vf->first_vector_idx < 0) {
ice_put_vf(vf);
return -EINVAL; return -EINVAL;
}
if (needs_rebuild) { if (needs_rebuild) {
ice_vf_reconfig_vsi(vf); ice_vf_reconfig_vsi(vf);
......
...@@ -6322,8 +6322,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id, ...@@ -6322,8 +6322,6 @@ ice_replay_vsi_fltr(struct ice_hw *hw, u16 vsi_handle, u8 recp_id,
if (!itr->vsi_list_info || if (!itr->vsi_list_info ||
!test_bit(vsi_handle, itr->vsi_list_info->vsi_map)) !test_bit(vsi_handle, itr->vsi_list_info->vsi_map))
continue; continue;
/* Clearing it so that the logic can add it back */
clear_bit(vsi_handle, itr->vsi_list_info->vsi_map);
f_entry.fltr_info.vsi_handle = vsi_handle; f_entry.fltr_info.vsi_handle = vsi_handle;
f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI; f_entry.fltr_info.fltr_act = ICE_FWD_TO_VSI;
/* update the src in case it is VSI num */ /* update the src in case it is VSI num */
......
...@@ -819,6 +819,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) ...@@ -819,6 +819,17 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr)
rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.flag |= ICE_FLTR_TX;
rule_info.sw_act.src = vsi->idx; rule_info.sw_act.src = vsi->idx;
rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE; rule_info.flags_info.act = ICE_SINGLE_ACT_LAN_ENABLE;
/* This is a specific case. The destination VSI index is
* overwritten by the source VSI index. This type of filter
* should allow the packet to go to the LAN, not to the
* VSI passed here. It should set LAN_EN bit only. However,
* the VSI must be a valid one. Setting source VSI index
* here is safe. Even if the result from switch is set LAN_EN
* and LB_EN (which normally will pass the packet to this VSI)
* packet won't be seen on the VSI, because local loopback is
* turned off.
*/
rule_info.sw_act.vsi_handle = vsi->idx;
} else { } else {
/* VF to VF */ /* VF to VF */
rule_info.sw_act.flag |= ICE_FLTR_TX; rule_info.sw_act.flag |= ICE_FLTR_TX;
......
...@@ -335,6 +335,13 @@ static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi) ...@@ -335,6 +335,13 @@ static int ice_vf_rebuild_host_vlan_cfg(struct ice_vf *vf, struct ice_vsi *vsi)
err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info); err = vlan_ops->add_vlan(vsi, &vf->port_vlan_info);
} else { } else {
/* clear possible previous port vlan config */
err = ice_vsi_clear_port_vlan(vsi);
if (err) {
dev_err(dev, "failed to clear port VLAN via VSI parameters for VF %u, error %d\n",
vf->vf_id, err);
return err;
}
err = ice_vsi_add_vlan_zero(vsi); err = ice_vsi_add_vlan_zero(vsi);
} }
......
...@@ -787,3 +787,60 @@ int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi) ...@@ -787,3 +787,60 @@ int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi)
kfree(ctxt); kfree(ctxt);
return err; return err;
} }
int ice_vsi_clear_port_vlan(struct ice_vsi *vsi)
{
struct ice_hw *hw = &vsi->back->hw;
struct ice_vsi_ctx *ctxt;
int err;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return -ENOMEM;
ctxt->info = vsi->info;
ctxt->info.port_based_outer_vlan = 0;
ctxt->info.port_based_inner_vlan = 0;
ctxt->info.inner_vlan_flags =
FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_TX_MODE_M,
ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL);
if (ice_is_dvm_ena(hw)) {
ctxt->info.inner_vlan_flags |=
FIELD_PREP(ICE_AQ_VSI_INNER_VLAN_EMODE_M,
ICE_AQ_VSI_INNER_VLAN_EMODE_NOTHING);
ctxt->info.outer_vlan_flags =
FIELD_PREP(ICE_AQ_VSI_OUTER_VLAN_TX_MODE_M,
ICE_AQ_VSI_OUTER_VLAN_TX_MODE_ALL);
ctxt->info.outer_vlan_flags |=
FIELD_PREP(ICE_AQ_VSI_OUTER_TAG_TYPE_M,
ICE_AQ_VSI_OUTER_TAG_VLAN_8100);
ctxt->info.outer_vlan_flags |=
ICE_AQ_VSI_OUTER_VLAN_EMODE_NOTHING <<
ICE_AQ_VSI_OUTER_VLAN_EMODE_S;
}
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
ctxt->info.valid_sections =
cpu_to_le16(ICE_AQ_VSI_PROP_OUTER_TAG_VALID |
ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
err = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (err) {
dev_err(ice_pf_to_dev(vsi->back), "update VSI for clearing port based VLAN failed, err %d aq_err %s\n",
err, ice_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.port_based_outer_vlan =
ctxt->info.port_based_outer_vlan;
vsi->info.port_based_inner_vlan =
ctxt->info.port_based_inner_vlan;
vsi->info.outer_vlan_flags = ctxt->info.outer_vlan_flags;
vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
}
kfree(ctxt);
return err;
}
...@@ -36,5 +36,6 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid); ...@@ -36,5 +36,6 @@ int ice_vsi_ena_outer_insertion(struct ice_vsi *vsi, u16 tpid);
int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi); int ice_vsi_dis_outer_insertion(struct ice_vsi *vsi);
int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan); int ice_vsi_set_outer_port_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan);
int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi); int ice_vsi_clear_outer_port_vlan(struct ice_vsi *vsi);
int ice_vsi_clear_port_vlan(struct ice_vsi *vsi);
#endif /* _ICE_VSI_VLAN_LIB_H_ */ #endif /* _ICE_VSI_VLAN_LIB_H_ */
...@@ -99,6 +99,7 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport) ...@@ -99,6 +99,7 @@ static int idpf_vf_intr_reg_init(struct idpf_vport *vport)
intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M; intr->dyn_ctl_intena_m = VF_INT_DYN_CTLN_INTENA_M;
intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M; intr->dyn_ctl_intena_msk_m = VF_INT_DYN_CTLN_INTENA_MSK_M;
intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S; intr->dyn_ctl_itridx_s = VF_INT_DYN_CTLN_ITR_INDX_S;
intr->dyn_ctl_intrvl_s = VF_INT_DYN_CTLN_INTERVAL_S;
intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M; intr->dyn_ctl_wb_on_itr_m = VF_INT_DYN_CTLN_WB_ON_ITR_M;
spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing, spacing = IDPF_ITR_IDX_SPACING(reg_vals[vec_id].itrn_index_spacing,
......
...@@ -666,7 +666,7 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter, ...@@ -666,7 +666,7 @@ idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
if (ctlq_msg->data_len) { if (ctlq_msg->data_len) {
payload = ctlq_msg->ctx.indirect.payload->va; payload = ctlq_msg->ctx.indirect.payload->va;
payload_size = ctlq_msg->ctx.indirect.payload->size; payload_size = ctlq_msg->data_len;
} }
xn->reply_sz = payload_size; xn->reply_sz = payload_size;
...@@ -1295,10 +1295,6 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, ...@@ -1295,10 +1295,6 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
err = reply_sz; err = reply_sz;
goto free_vport_params; goto free_vport_params;
} }
if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
err = -EIO;
goto free_vport_params;
}
return 0; return 0;
...@@ -2602,9 +2598,6 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) ...@@ -2602,9 +2598,6 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
if (reply_sz < 0) if (reply_sz < 0)
return reply_sz; return reply_sz;
if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
return -EIO;
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes); ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
if (ptypes_recvd > max_ptype) if (ptypes_recvd > max_ptype)
return -EINVAL; return -EINVAL;
...@@ -3088,9 +3081,9 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter) ...@@ -3088,9 +3081,9 @@ void idpf_vc_core_deinit(struct idpf_adapter *adapter)
if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags)) if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
return; return;
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_task(adapter); idpf_deinit_task(adapter);
idpf_intr_rel(adapter); idpf_intr_rel(adapter);
idpf_vc_xn_shutdown(adapter->vcxn_mngr);
cancel_delayed_work_sync(&adapter->serv_task); cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task); cancel_delayed_work_sync(&adapter->mbx_task);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment