Commit a19f7d7d authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2022-02-10

Dan Carpenter propagates an error in FEC configuration.

Jesse fixes TSO offloads of IPIP and SIT frames.

Dave adds a dedicated LAG unregister function to resolve a KASAN error
and moves auxiliary device re-creation after LAG removal to the service
task to avoid issues with RTNL lock.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ice: Avoid RTNL lock when re-creating auxiliary device
  ice: Fix KASAN error in LAG NETDEV_UNREGISTER handler
  ice: fix IPIP and SIT TSO offload
  ice: fix an error code in ice_cfg_phy_fec()
====================

Link: https://lore.kernel.org/r/20220210170515.2609656-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 7fbf6795 5dbbbd01
...@@ -483,6 +483,7 @@ enum ice_pf_flags { ...@@ -483,6 +483,7 @@ enum ice_pf_flags {
ICE_FLAG_VF_TRUE_PROMISC_ENA, ICE_FLAG_VF_TRUE_PROMISC_ENA,
ICE_FLAG_MDD_AUTO_RESET_VF, ICE_FLAG_MDD_AUTO_RESET_VF,
ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_FLAG_LINK_LENIENT_MODE_ENA,
ICE_FLAG_PLUG_AUX_DEV,
ICE_PF_FLAGS_NBITS /* must be last */ ICE_PF_FLAGS_NBITS /* must be last */
}; };
...@@ -887,7 +888,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) ...@@ -887,7 +888,7 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf)
if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) { if (pf->hw.func_caps.common_cap.rdma && pf->num_rdma_msix) {
set_bit(ICE_FLAG_RDMA_ENA, pf->flags); set_bit(ICE_FLAG_RDMA_ENA, pf->flags);
set_bit(ICE_FLAG_AUX_ENA, pf->flags); set_bit(ICE_FLAG_AUX_ENA, pf->flags);
ice_plug_aux_dev(pf); set_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags);
} }
} }
......
...@@ -3342,7 +3342,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, ...@@ -3342,7 +3342,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
!ice_fw_supports_report_dflt_cfg(hw)) { !ice_fw_supports_report_dflt_cfg(hw)) {
struct ice_link_default_override_tlv tlv; struct ice_link_default_override_tlv tlv;
if (ice_get_link_default_override(&tlv, pi)) status = ice_get_link_default_override(&tlv, pi);
if (status)
goto out; goto out;
if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) && if (!(tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE) &&
......
...@@ -204,17 +204,39 @@ ice_lag_unlink(struct ice_lag *lag, ...@@ -204,17 +204,39 @@ ice_lag_unlink(struct ice_lag *lag,
lag->upper_netdev = NULL; lag->upper_netdev = NULL;
} }
if (lag->peer_netdev) { lag->peer_netdev = NULL;
dev_put(lag->peer_netdev);
lag->peer_netdev = NULL;
}
ice_set_sriov_cap(pf); ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf); ice_set_rdma_cap(pf);
lag->bonded = false; lag->bonded = false;
lag->role = ICE_LAG_NONE; lag->role = ICE_LAG_NONE;
} }
/**
* ice_lag_unregister - handle netdev unregister events
* @lag: LAG info struct
* @netdev: netdev reporting the event
*/
static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
{
struct ice_pf *pf = lag->pf;
/* check to see if this event is for this netdev
* check that we are in an aggregate
*/
if (netdev != lag->netdev || !lag->bonded)
return;
if (lag->upper_netdev) {
dev_put(lag->upper_netdev);
lag->upper_netdev = NULL;
ice_set_sriov_cap(pf);
ice_set_rdma_cap(pf);
}
/* perform some cleanup in case we come back */
lag->bonded = false;
lag->role = ICE_LAG_NONE;
}
/** /**
* ice_lag_changeupper_event - handle LAG changeupper event * ice_lag_changeupper_event - handle LAG changeupper event
* @lag: LAG info struct * @lag: LAG info struct
...@@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event, ...@@ -307,7 +329,7 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
ice_lag_info_event(lag, ptr); ice_lag_info_event(lag, ptr);
break; break;
case NETDEV_UNREGISTER: case NETDEV_UNREGISTER:
ice_lag_unlink(lag, ptr); ice_lag_unregister(lag, netdev);
break; break;
default: default:
break; break;
......
...@@ -568,6 +568,7 @@ struct ice_tx_ctx_desc { ...@@ -568,6 +568,7 @@ struct ice_tx_ctx_desc {
(0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S) (0x3FFFFULL << ICE_TXD_CTX_QW1_TSO_LEN_S)
#define ICE_TXD_CTX_QW1_MSS_S 50 #define ICE_TXD_CTX_QW1_MSS_S 50
#define ICE_TXD_CTX_MIN_MSS 64
#define ICE_TXD_CTX_QW1_VSI_S 50 #define ICE_TXD_CTX_QW1_VSI_S 50
#define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S) #define ICE_TXD_CTX_QW1_VSI_M (0x3FFULL << ICE_TXD_CTX_QW1_VSI_S)
......
...@@ -2253,6 +2253,9 @@ static void ice_service_task(struct work_struct *work) ...@@ -2253,6 +2253,9 @@ static void ice_service_task(struct work_struct *work)
return; return;
} }
if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags))
ice_plug_aux_dev(pf);
ice_clean_adminq_subtask(pf); ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf); ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf); ice_check_for_hang_subtask(pf);
...@@ -8525,6 +8528,7 @@ ice_features_check(struct sk_buff *skb, ...@@ -8525,6 +8528,7 @@ ice_features_check(struct sk_buff *skb,
struct net_device __always_unused *netdev, struct net_device __always_unused *netdev,
netdev_features_t features) netdev_features_t features)
{ {
bool gso = skb_is_gso(skb);
size_t len; size_t len;
/* No point in doing any of this if neither checksum nor GSO are /* No point in doing any of this if neither checksum nor GSO are
...@@ -8537,24 +8541,32 @@ ice_features_check(struct sk_buff *skb, ...@@ -8537,24 +8541,32 @@ ice_features_check(struct sk_buff *skb,
/* We cannot support GSO if the MSS is going to be less than /* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO. * 64 bytes. If it is then we need to drop support for GSO.
*/ */
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS))
features &= ~NETIF_F_GSO_MASK; features &= ~NETIF_F_GSO_MASK;
len = skb_network_header(skb) - skb->data; len = skb_network_offset(skb);
if (len > ICE_TXD_MACLEN_MAX || len & 0x1) if (len > ICE_TXD_MACLEN_MAX || len & 0x1)
goto out_rm_features; goto out_rm_features;
len = skb_transport_header(skb) - skb_network_header(skb); len = skb_network_header_len(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1) if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features; goto out_rm_features;
if (skb->encapsulation) { if (skb->encapsulation) {
len = skb_inner_network_header(skb) - skb_transport_header(skb); /* this must work for VXLAN frames AND IPIP/SIT frames, and in
if (len > ICE_TXD_L4LEN_MAX || len & 0x1) * the case of IPIP frames, the transport header pointer is
goto out_rm_features; * after the inner header! So check to make sure that this
* is a GRE or UDP_TUNNEL frame before doing that math.
*/
if (gso && (skb_shinfo(skb)->gso_type &
(SKB_GSO_GRE | SKB_GSO_UDP_TUNNEL))) {
len = skb_inner_network_header(skb) -
skb_transport_header(skb);
if (len > ICE_TXD_L4LEN_MAX || len & 0x1)
goto out_rm_features;
}
len = skb_inner_transport_header(skb) - len = skb_inner_network_header_len(skb);
skb_inner_network_header(skb);
if (len > ICE_TXD_IPLEN_MAX || len & 0x1) if (len > ICE_TXD_IPLEN_MAX || len & 0x1)
goto out_rm_features; goto out_rm_features;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment