Commit c8ff576e authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2022-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2022-02-01

This series provides bug fixes to mlx5 driver.
Please pull and let me know if there is any problem.

Sorry about the long series, but I had to move the top two patches from
net-next to net to help avoiding a build break when kspp branch is merged
into linus-next on next merge window.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3aa430d3 ad518573
...@@ -224,7 +224,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) ...@@ -224,7 +224,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
struct mlx5e_tx_wqe { struct mlx5e_tx_wqe {
struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_ctrl_seg ctrl;
struct mlx5_wqe_eth_seg eth; struct mlx5_wqe_eth_seg eth;
struct mlx5_wqe_data_seg data[0]; struct mlx5_wqe_data_seg data[];
}; };
struct mlx5e_rx_wqe_ll { struct mlx5e_rx_wqe_ll {
...@@ -241,8 +241,8 @@ struct mlx5e_umr_wqe { ...@@ -241,8 +241,8 @@ struct mlx5e_umr_wqe {
struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_wqe_umr_ctrl_seg uctrl;
struct mlx5_mkey_seg mkc; struct mlx5_mkey_seg mkc;
union { union {
struct mlx5_mtt inline_mtts[0]; DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts);
struct mlx5_klm inline_klms[0]; DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms);
}; };
}; };
......
...@@ -570,7 +570,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate, ...@@ -570,7 +570,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate,
static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw) static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw)
{ {
*max_average_bw = div_u64(ceil, BYTES_IN_MBIT); /* Hardware treats 0 as "unlimited", set at least 1. */
*max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1);
qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n", qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n",
ceil, *max_average_bw); ceil, *max_average_bw);
......
...@@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, ...@@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{ {
struct mlx5e_rep_priv *rpriv; return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev);
struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev))
return false;
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
} }
static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr) static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr)
...@@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt ...@@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt
u16 fwd_vport_num; u16 fwd_vport_num;
int err; int err;
if (!mlx5e_rep_is_lag_netdev(netdev))
return;
info = ptr; info = ptr;
lag_info = info->lower_state_info; lag_info = info->lower_state_info;
/* This is not an event of a representor becoming active slave */ /* This is not an event of a representor becoming active slave */
...@@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr) ...@@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr)
struct net_device *lag_dev; struct net_device *lag_dev;
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
if (!mlx5e_rep_is_lag_netdev(netdev))
return;
priv = netdev_priv(netdev); priv = netdev_priv(netdev);
rpriv = priv->ppriv; rpriv = priv->ppriv;
lag_dev = info->upper_dev; lag_dev = info->upper_dev;
...@@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb, ...@@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb,
unsigned long event, void *ptr) unsigned long event, void *ptr)
{ {
struct net_device *netdev = netdev_notifier_info_to_dev(ptr); struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
struct mlx5e_rep_priv *rpriv;
struct mlx5e_rep_bond *bond;
struct mlx5e_priv *priv;
if (!mlx5e_rep_is_lag_netdev(netdev))
return NOTIFY_DONE;
bond = container_of(nb, struct mlx5e_rep_bond, nb);
priv = netdev_priv(netdev);
rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH);
/* Verify VF representor is on the same device of the bond handling the netevent. */
if (rpriv->uplink_priv.bond != bond)
return NOTIFY_DONE;
switch (event) { switch (event) {
case NETDEV_CHANGELOWERSTATE: case NETDEV_CHANGELOWERSTATE:
......
...@@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv) ...@@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
} }
br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event; br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event;
err = register_netdevice_notifier(&br_offloads->netdev_nb); err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
if (err) { if (err) {
esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n", esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n",
err); err);
...@@ -509,7 +509,9 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv) ...@@ -509,7 +509,9 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv)
err_register_swdev: err_register_swdev:
destroy_workqueue(br_offloads->wq); destroy_workqueue(br_offloads->wq);
err_alloc_wq: err_alloc_wq:
rtnl_lock();
mlx5_esw_bridge_cleanup(esw); mlx5_esw_bridge_cleanup(esw);
rtnl_unlock();
} }
void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
...@@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) ...@@ -524,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv)
return; return;
cancel_delayed_work_sync(&br_offloads->update_work); cancel_delayed_work_sync(&br_offloads->update_work);
unregister_netdevice_notifier(&br_offloads->netdev_nb); unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb);
unregister_switchdev_blocking_notifier(&br_offloads->nb_blk); unregister_switchdev_blocking_notifier(&br_offloads->nb_blk);
unregister_switchdev_notifier(&br_offloads->nb); unregister_switchdev_notifier(&br_offloads->nb);
destroy_workqueue(br_offloads->wq); destroy_workqueue(br_offloads->wq);
......
...@@ -167,6 +167,11 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) ...@@ -167,6 +167,11 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size)
return pi; return pi;
} }
static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1);
}
struct mlx5e_shampo_umr { struct mlx5e_shampo_umr {
u16 len; u16 len;
}; };
......
...@@ -341,8 +341,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, ...@@ -341,8 +341,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
/* copy the inline part if required */ /* copy the inline part if required */
if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE); memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start));
eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start),
MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start));
dma_len -= MLX5E_XDP_MIN_INLINE; dma_len -= MLX5E_XDP_MIN_INLINE;
dma_addr += MLX5E_XDP_MIN_INLINE; dma_addr += MLX5E_XDP_MIN_INLINE;
dseg++; dseg++;
......
...@@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, ...@@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb,
/* Tunnel mode */ /* Tunnel mode */
if (mode == XFRM_MODE_TUNNEL) { if (mode == XFRM_MODE_TUNNEL) {
eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2;
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
if (xo->proto == IPPROTO_IPV6) if (xo->proto == IPPROTO_IPV6)
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6;
if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP)
switch (xo->inner_ipproto) {
case IPPROTO_UDP:
eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP;
fallthrough;
case IPPROTO_TCP:
/* IP | ESP | IP | [TCP | UDP] */
eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2;
break;
default:
break;
}
return; return;
} }
......
...@@ -131,14 +131,17 @@ static inline bool ...@@ -131,14 +131,17 @@ static inline bool
mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg) struct mlx5_wqe_eth_seg *eseg)
{ {
struct xfrm_offload *xo = xfrm_offload(skb); u8 inner_ipproto;
if (!mlx5e_ipsec_eseg_meta(eseg)) if (!mlx5e_ipsec_eseg_meta(eseg))
return false; return false;
eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM;
if (xo->inner_ipproto) { inner_ipproto = xfrm_offload(skb)->inner_ipproto;
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM; if (inner_ipproto) {
eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM;
if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP)
eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM;
} else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM;
sq->stats->csum_partial_inner++; sq->stats->csum_partial_inner++;
......
...@@ -1117,7 +1117,7 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr ...@@ -1117,7 +1117,7 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr
static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct tcphdr *skb_tcp_hd) struct tcphdr *skb_tcp_hd)
{ {
u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
struct tcphdr *last_tcp_hd; struct tcphdr *last_tcp_hd;
void *last_hd_addr; void *last_hd_addr;
...@@ -1871,7 +1871,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ...@@ -1871,7 +1871,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
return skb; return skb;
} }
static void static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct mlx5_cqe64 *cqe, u16 header_index) struct mlx5_cqe64 *cqe, u16 header_index)
{ {
...@@ -1895,7 +1895,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ...@@ -1895,7 +1895,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
if (unlikely(!skb)) if (unlikely(!skb))
return; return NULL;
/* queue up for recycling/reuse */ /* queue up for recycling/reuse */
page_ref_inc(head->page); page_ref_inc(head->page);
...@@ -1907,7 +1907,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ...@@ -1907,7 +1907,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
ALIGN(head_size, sizeof(long))); ALIGN(head_size, sizeof(long)));
if (unlikely(!skb)) { if (unlikely(!skb)) {
rq->stats->buff_alloc_err++; rq->stats->buff_alloc_err++;
return; return NULL;
} }
prefetchw(skb->data); prefetchw(skb->data);
...@@ -1918,9 +1918,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ...@@ -1918,9 +1918,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
skb->tail += head_size; skb->tail += head_size;
skb->len += head_size; skb->len += head_size;
} }
rq->hw_gro_data->skb = skb; return skb;
NAPI_GRO_CB(skb)->count = 1;
skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size;
} }
static void static void
...@@ -1973,13 +1971,14 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) ...@@ -1973,13 +1971,14 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{ {
u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset); u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
u32 data_offset = wqe_offset & (PAGE_SIZE - 1); u32 data_offset = wqe_offset & (PAGE_SIZE - 1);
u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
u16 wqe_id = be16_to_cpu(cqe->wqe_id); u16 wqe_id = be16_to_cpu(cqe->wqe_id);
u32 page_idx = wqe_offset >> PAGE_SHIFT; u32 page_idx = wqe_offset >> PAGE_SHIFT;
u16 head_size = cqe->shampo.header_size;
struct sk_buff **skb = &rq->hw_gro_data->skb; struct sk_buff **skb = &rq->hw_gro_data->skb;
bool flush = cqe->shampo.flush; bool flush = cqe->shampo.flush;
bool match = cqe->shampo.match; bool match = cqe->shampo.match;
...@@ -2011,9 +2010,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq ...@@ -2011,9 +2010,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
} }
if (!*skb) { if (!*skb) {
mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); if (likely(head_size))
*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
else
*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset,
page_idx);
if (unlikely(!*skb)) if (unlikely(!*skb))
goto free_hd_entry; goto free_hd_entry;
NAPI_GRO_CB(*skb)->count = 1;
skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
} else { } else {
NAPI_GRO_CB(*skb)->count++; NAPI_GRO_CB(*skb)->count++;
if (NAPI_GRO_CB(*skb)->count == 2 && if (NAPI_GRO_CB(*skb)->count == 2 &&
...@@ -2027,8 +2033,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq ...@@ -2027,8 +2033,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq
} }
} }
if (likely(head_size)) {
di = &wi->umr.dma_info[page_idx]; di = &wi->umr.dma_info[page_idx];
mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset);
}
mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
if (flush) if (flush)
......
...@@ -1414,7 +1414,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -1414,7 +1414,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (err) if (err)
goto err_out; goto err_out;
if (!attr->chain && esw_attr->int_port) { if (!attr->chain && esw_attr->int_port &&
attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
/* If decap route device is internal port, change the /* If decap route device is internal port, change the
* source vport value in reg_c0 back to uplink just in * source vport value in reg_c0 back to uplink just in
* case the rule performs goto chain > 0. If we have a miss * case the rule performs goto chain > 0. If we have a miss
...@@ -3191,6 +3192,18 @@ actions_match_supported(struct mlx5e_priv *priv, ...@@ -3191,6 +3192,18 @@ actions_match_supported(struct mlx5e_priv *priv,
return false; return false;
} }
if (!(~actions &
(MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
return false;
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
return false;
}
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
!modify_header_match_supported(priv, &parse_attr->spec, flow_action, !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
actions, ct_flow, ct_clear, extack)) actions, ct_flow, ct_clear, extack))
......
...@@ -208,7 +208,7 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) ...@@ -208,7 +208,7 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
int cpy1_sz = 2 * ETH_ALEN; int cpy1_sz = 2 * ETH_ALEN;
int cpy2_sz = ihs - cpy1_sz; int cpy2_sz = ihs - cpy1_sz;
memcpy(vhdr, skb->data, cpy1_sz); memcpy(&vhdr->addrs, skb->data, cpy1_sz);
vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_proto = skb->vlan_proto;
vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz);
......
...@@ -1574,6 +1574,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw) ...@@ -1574,6 +1574,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw)
{ {
struct mlx5_esw_bridge_offloads *br_offloads; struct mlx5_esw_bridge_offloads *br_offloads;
ASSERT_RTNL();
br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL); br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL);
if (!br_offloads) if (!br_offloads)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
...@@ -1590,6 +1592,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw) ...@@ -1590,6 +1592,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw)
{ {
struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads; struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads;
ASSERT_RTNL();
if (!br_offloads) if (!br_offloads)
return; return;
......
...@@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template, ...@@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template,
__field(unsigned int, used) __field(unsigned int, used)
), ),
TP_fast_assign( TP_fast_assign(
strncpy(__entry->dev_name, strscpy(__entry->dev_name,
netdev_name(fdb->dev), netdev_name(fdb->dev),
IFNAMSIZ); IFNAMSIZ);
memcpy(__entry->addr, fdb->key.addr, ETH_ALEN); memcpy(__entry->addr, fdb->key.addr, ETH_ALEN);
......
...@@ -132,7 +132,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) ...@@ -132,7 +132,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev)
{ {
struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset;
del_timer(&fw_reset->timer); del_timer_sync(&fw_reset->timer);
} }
static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health)
......
...@@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains) ...@@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains)
u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains)
{ {
if (!mlx5_chains_prios_supported(chains))
return 1;
if (mlx5_chains_ignore_flow_level_supported(chains)) if (mlx5_chains_ignore_flow_level_supported(chains))
return UINT_MAX; return UINT_MAX;
if (!chains->dev->priv.eswitch ||
chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS)
return 1;
/* We should get here only for eswitch case */ /* We should get here only for eswitch case */
return FDB_TC_MAX_PRIO; return FDB_TC_MAX_PRIO;
} }
...@@ -211,7 +212,7 @@ static int ...@@ -211,7 +212,7 @@ static int
create_chain_restore(struct fs_chain *chain) create_chain_restore(struct fs_chain *chain)
{ {
struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch; struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch;
char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)]; u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
struct mlx5_fs_chains *chains = chain->chains; struct mlx5_fs_chains *chains = chain->chains;
enum mlx5e_tc_attr_to_reg chain_to_reg; enum mlx5e_tc_attr_to_reg chain_to_reg;
struct mlx5_modify_hdr *mod_hdr; struct mlx5_modify_hdr *mod_hdr;
......
...@@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, ...@@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev,
switch (module_id) { switch (module_id) {
case MLX5_MODULE_ID_SFP: case MLX5_MODULE_ID_SFP:
mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break; break;
case MLX5_MODULE_ID_QSFP: case MLX5_MODULE_ID_QSFP:
case MLX5_MODULE_ID_QSFP_PLUS: case MLX5_MODULE_ID_QSFP_PLUS:
case MLX5_MODULE_ID_QSFP28: case MLX5_MODULE_ID_QSFP28:
mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset);
break; break;
default: default:
mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id);
return -EINVAL; return -EINVAL;
} }
if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH) if (offset + size > MLX5_EEPROM_PAGE_LENGTH)
/* Cross pages read, read until offset 256 in low page */ /* Cross pages read, read until offset 256 in low page */
size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; size = MLX5_EEPROM_PAGE_LENGTH - offset;
query.size = size; query.size = size;
query.offset = offset;
return mlx5_query_mcia(dev, &query, data); return mlx5_query_mcia(dev, &query, data);
} }
......
...@@ -46,8 +46,10 @@ struct vlan_hdr { ...@@ -46,8 +46,10 @@ struct vlan_hdr {
* @h_vlan_encapsulated_proto: packet type ID or len * @h_vlan_encapsulated_proto: packet type ID or len
*/ */
struct vlan_ethhdr { struct vlan_ethhdr {
struct_group(addrs,
unsigned char h_dest[ETH_ALEN]; unsigned char h_dest[ETH_ALEN];
unsigned char h_source[ETH_ALEN]; unsigned char h_source[ETH_ALEN];
);
__be16 h_vlan_proto; __be16 h_vlan_proto;
__be16 h_vlan_TCI; __be16 h_vlan_TCI;
__be16 h_vlan_encapsulated_proto; __be16 h_vlan_encapsulated_proto;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment