Commit 68171bbd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'net-5.19-rc2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
 "Quick follow up, to cleanly fast-forward net again.

  Current release - new code bugs:

   - Revert "net/mlx5e: Allow relaxed ordering over VFs"

  Previous releases - regressions:

   - seg6: fix seg6_lookup_any_nexthop() to handle VRFs using
     flowi_l3mdev

  Misc:

   - rename TLS_INFO_ZC_SENDFILE to better express the meaning"

* tag 'net-5.19-rc2-2' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net:
  net: seg6: fix seg6_lookup_any_nexthop() to handle VRFs using flowi_l3mdev
  nfp: flower: restructure flow-key for gre+vlan combination
  nfp: avoid unnecessary check warnings in nfp_app_get_vf_config
  tls: Rename TLS_INFO_ZC_SENDFILE to TLS_INFO_ZC_TX
  net/mlx5: fs, fail conflicting actions
  net/mlx5: Rearm the FW tracer after each tracer event
  net/mlx5: E-Switch, pair only capable devices
  net/mlx5e: CT: Fix cleanup of CT before cleanup of TC ct rules
  Revert "net/mlx5e: Allow relaxed ordering over VFs"
  MAINTAINERS: adjust MELLANOX ETHERNET INNOVA DRIVERS to TLS support removal
parents f2ecc964 bf56a091
...@@ -12703,7 +12703,6 @@ L: netdev@vger.kernel.org ...@@ -12703,7 +12703,6 @@ L: netdev@vger.kernel.org
S: Supported S: Supported
W: http://www.mellanox.com W: http://www.mellanox.com
Q: https://patchwork.kernel.org/project/netdevbpf/list/ Q: https://patchwork.kernel.org/project/netdevbpf/list/
F: drivers/net/ethernet/mellanox/mlx5/core/accel/*
F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/* F: drivers/net/ethernet/mellanox/mlx5/core/en_accel/*
F: drivers/net/ethernet/mellanox/mlx5/core/fpga/* F: drivers/net/ethernet/mellanox/mlx5/core/fpga/*
F: include/linux/mlx5/mlx5_ifc_fpga.h F: include/linux/mlx5/mlx5_ifc_fpga.h
......
...@@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other) ...@@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other)
return pci_get_drvdata(to_pci_dev(other)); return pci_get_drvdata(to_pci_dev(other));
} }
static int next_phys_dev(struct device *dev, const void *data)
{
struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
mdev = pci_get_other_drvdata(this->device, dev);
if (!mdev)
return 0;
return _next_phys_dev(mdev, data);
}
static int next_phys_dev_lag(struct device *dev, const void *data) static int next_phys_dev_lag(struct device *dev, const void *data)
{ {
struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data;
...@@ -623,13 +612,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev, ...@@ -623,13 +612,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev,
return pci_get_drvdata(to_pci_dev(next)); return pci_get_drvdata(to_pci_dev(next));
} }
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
lockdep_assert_held(&mlx5_intf_mutex);
return mlx5_get_next_dev(dev, &next_phys_dev);
}
/* Must be called with intf_mutex held */ /* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev) struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev)
{ {
......
...@@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) ...@@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
if (!tracer->owner) if (!tracer->owner)
return; return;
if (unlikely(!tracer->str_db.loaded))
goto arm;
block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE;
start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE;
...@@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) ...@@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work)
&tmp_trace_block[TRACES_PER_BLOCK - 1]); &tmp_trace_block[TRACES_PER_BLOCK - 1]);
} }
arm:
mlx5_fw_tracer_arm(dev); mlx5_fw_tracer_arm(dev);
} }
...@@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void ...@@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void
queue_work(tracer->work_queue, &tracer->ownership_change_work); queue_work(tracer->work_queue, &tracer->ownership_change_work);
break; break;
case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE:
if (likely(tracer->str_db.loaded)) queue_work(tracer->work_queue, &tracer->handle_traces_work);
queue_work(tracer->work_queue, &tracer->handle_traces_work);
break; break;
default: default:
mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n",
......
...@@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, ...@@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev,
static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
{ {
bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO;
bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) &&
MLX5_CAP_GEN(mdev, relaxed_ordering_write);
return ro && lro_en ? return ro && lro_en ?
MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN;
......
...@@ -38,11 +38,12 @@ ...@@ -38,11 +38,12 @@
void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc)
{ {
bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev);
bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write);
bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read);
MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read); MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read);
MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write); MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write);
} }
static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn,
......
...@@ -950,6 +950,13 @@ static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) ...@@ -950,6 +950,13 @@ static int mlx5e_init_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
return err; return err;
} }
static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
{
mlx5e_rep_tc_netdevice_event_unregister(rpriv);
mlx5e_rep_bond_cleanup(rpriv);
mlx5e_rep_tc_cleanup(rpriv);
}
static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
{ {
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
...@@ -961,42 +968,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) ...@@ -961,42 +968,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err; return err;
} }
err = mlx5e_tc_ht_init(&rpriv->tc_ht);
if (err)
goto err_ht_init;
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv); err = mlx5e_init_uplink_rep_tx(rpriv);
if (err) if (err)
goto err_init_tx; goto err_init_tx;
} }
err = mlx5e_tc_ht_init(&rpriv->tc_ht);
if (err)
goto err_ht_init;
return 0; return 0;
err_init_tx:
mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
err_ht_init: err_ht_init:
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
err_init_tx:
mlx5e_destroy_tises(priv); mlx5e_destroy_tises(priv);
return err; return err;
} }
static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv)
{
mlx5e_rep_tc_netdevice_event_unregister(rpriv);
mlx5e_rep_bond_cleanup(rpriv);
mlx5e_rep_tc_cleanup(rpriv);
}
static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
{ {
struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5e_destroy_tises(priv); mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv); mlx5e_cleanup_uplink_rep_tx(rpriv);
mlx5e_tc_ht_cleanup(&rpriv->tc_ht); mlx5e_destroy_tises(priv);
} }
static void mlx5e_rep_enable(struct mlx5e_priv *priv) static void mlx5e_rep_enable(struct mlx5e_priv *priv)
......
...@@ -2690,9 +2690,6 @@ static int mlx5_esw_offloads_devcom_event(int event, ...@@ -2690,9 +2690,6 @@ static int mlx5_esw_offloads_devcom_event(int event,
switch (event) { switch (event) {
case ESW_OFFLOADS_DEVCOM_PAIR: case ESW_OFFLOADS_DEVCOM_PAIR:
if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
break;
if (mlx5_eswitch_vport_match_metadata_enabled(esw) != if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
break; break;
...@@ -2744,6 +2741,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) ...@@ -2744,6 +2741,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return; return;
if (!mlx5_is_lag_supported(esw->dev))
return;
mlx5_devcom_register_component(devcom, mlx5_devcom_register_component(devcom,
MLX5_DEVCOM_ESW_OFFLOADS, MLX5_DEVCOM_ESW_OFFLOADS,
mlx5_esw_offloads_devcom_event, mlx5_esw_offloads_devcom_event,
...@@ -2761,6 +2761,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) ...@@ -2761,6 +2761,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
return; return;
if (!mlx5_is_lag_supported(esw->dev))
return;
mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
ESW_OFFLOADS_DEVCOM_UNPAIR, esw); ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
......
...@@ -1574,9 +1574,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, ...@@ -1574,9 +1574,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte,
return NULL; return NULL;
} }
static bool check_conflicting_actions(u32 action1, u32 action2) static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0,
const struct mlx5_fs_vlan *vlan1)
{ {
u32 xored_actions = action1 ^ action2; return vlan0->ethtype != vlan1->ethtype ||
vlan0->vid != vlan1->vid ||
vlan0->prio != vlan1->prio;
}
static bool check_conflicting_actions(const struct mlx5_flow_act *act1,
const struct mlx5_flow_act *act2)
{
u32 action1 = act1->action;
u32 action2 = act2->action;
u32 xored_actions;
xored_actions = action1 ^ action2;
/* if one rule only wants to count, it's ok */ /* if one rule only wants to count, it's ok */
if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT ||
...@@ -1593,6 +1606,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2) ...@@ -1593,6 +1606,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2)
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2))
return true; return true;
if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT &&
act1->pkt_reformat != act2->pkt_reformat)
return true;
if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
act1->modify_hdr != act2->modify_hdr)
return true;
if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH &&
check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0]))
return true;
if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 &&
check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1]))
return true;
return false; return false;
} }
...@@ -1600,7 +1629,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, ...@@ -1600,7 +1629,7 @@ static int check_conflicting_ftes(struct fs_fte *fte,
const struct mlx5_flow_context *flow_context, const struct mlx5_flow_context *flow_context,
const struct mlx5_flow_act *flow_act) const struct mlx5_flow_act *flow_act)
{ {
if (check_conflicting_actions(flow_act->action, fte->action.action)) { if (check_conflicting_actions(flow_act, &fte->action)) {
mlx5_core_warn(get_dev(&fte->node), mlx5_core_warn(get_dev(&fte->node),
"Found two FTEs with conflicting actions\n"); "Found two FTEs with conflicting actions\n");
return -EEXIST; return -EEXIST;
......
...@@ -74,6 +74,16 @@ struct mlx5_lag { ...@@ -74,6 +74,16 @@ struct mlx5_lag {
struct lag_mpesw lag_mpesw; struct lag_mpesw lag_mpesw;
}; };
static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev)
{
if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) < 2 ||
MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS)
return false;
return true;
}
static inline struct mlx5_lag * static inline struct mlx5_lag *
mlx5_lag_dev(struct mlx5_core_dev *dev) mlx5_lag_dev(struct mlx5_core_dev *dev)
{ {
......
...@@ -209,7 +209,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev); ...@@ -209,7 +209,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev);
void mlx5_detach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev);
int mlx5_register_device(struct mlx5_core_dev *dev); int mlx5_register_device(struct mlx5_core_dev *dev);
void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev);
struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev);
void mlx5_dev_list_lock(void); void mlx5_dev_list_lock(void);
void mlx5_dev_list_unlock(void); void mlx5_dev_list_unlock(void);
......
...@@ -507,6 +507,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) ...@@ -507,6 +507,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv6); key_size += sizeof(struct nfp_flower_ipv6);
} }
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
map[FLOW_PAY_QINQ] = key_size;
key_size += sizeof(struct nfp_flower_vlan);
}
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) { if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
map[FLOW_PAY_GRE] = key_size; map[FLOW_PAY_GRE] = key_size;
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6)
...@@ -515,11 +520,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) ...@@ -515,11 +520,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map)
key_size += sizeof(struct nfp_flower_ipv4_gre_tun); key_size += sizeof(struct nfp_flower_ipv4_gre_tun);
} }
if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) {
map[FLOW_PAY_QINQ] = key_size;
key_size += sizeof(struct nfp_flower_vlan);
}
if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) || if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) ||
(in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) { (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) {
map[FLOW_PAY_UDP_TUN] = key_size; map[FLOW_PAY_UDP_TUN] = key_size;
...@@ -758,6 +758,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -758,6 +758,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
} }
} }
if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
offset = key_map[FLOW_PAY_QINQ];
key = kdata + offset;
msk = mdata + offset;
for (i = 0; i < _CT_TYPE_MAX; i++) {
nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
(struct nfp_flower_vlan *)msk,
rules[i]);
}
}
if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) { if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) {
offset = key_map[FLOW_PAY_GRE]; offset = key_map[FLOW_PAY_GRE];
key = kdata + offset; key = kdata + offset;
...@@ -798,17 +809,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) ...@@ -798,17 +809,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry)
} }
} }
if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) {
offset = key_map[FLOW_PAY_QINQ];
key = kdata + offset;
msk = mdata + offset;
for (i = 0; i < _CT_TYPE_MAX; i++) {
nfp_flower_compile_vlan((struct nfp_flower_vlan *)key,
(struct nfp_flower_vlan *)msk,
rules[i]);
}
}
if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN || if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
offset = key_map[FLOW_PAY_UDP_TUN]; offset = key_map[FLOW_PAY_UDP_TUN];
......
...@@ -625,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -625,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
msk += sizeof(struct nfp_flower_ipv6); msk += sizeof(struct nfp_flower_ipv6);
} }
if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
(struct nfp_flower_vlan *)msk,
rule);
ext += sizeof(struct nfp_flower_vlan);
msk += sizeof(struct nfp_flower_vlan);
}
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
struct nfp_flower_ipv6_gre_tun *gre_match; struct nfp_flower_ipv6_gre_tun *gre_match;
...@@ -660,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, ...@@ -660,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
} }
} }
if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) {
nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext,
(struct nfp_flower_vlan *)msk,
rule);
ext += sizeof(struct nfp_flower_vlan);
msk += sizeof(struct nfp_flower_vlan);
}
if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN ||
key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) {
if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) {
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include "nfp_net_sriov.h" #include "nfp_net_sriov.h"
static int static int
nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn)
{ {
u16 cap_vf; u16 cap_vf;
...@@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) ...@@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg)
cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP); cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP);
if ((cap_vf & cap) != cap) { if ((cap_vf & cap) != cap) {
nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); if (warn)
nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
if (vf < 0 || vf >= app->pf->num_vfs) { if (vf < 0 || vf >= app->pf->num_vfs) {
nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); if (warn)
nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf);
return -EINVAL; return -EINVAL;
} }
...@@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) ...@@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
unsigned int vf_offset; unsigned int vf_offset;
int err; int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac"); err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true);
if (err) if (err)
return err; return err;
...@@ -101,7 +103,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, ...@@ -101,7 +103,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
u32 vlan_tag; u32 vlan_tag;
int err; int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan"); err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan", true);
if (err) if (err)
return err; return err;
...@@ -115,7 +117,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, ...@@ -115,7 +117,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos,
} }
/* Check if fw supports or not */ /* Check if fw supports or not */
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"); err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", true);
if (err) if (err)
is_proto_sup = false; is_proto_sup = false;
...@@ -149,7 +151,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf, ...@@ -149,7 +151,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf,
u32 vf_offset, ratevalue; u32 vf_offset, ratevalue;
int err; int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", true);
if (err) if (err)
return err; return err;
...@@ -181,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) ...@@ -181,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
int err; int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF, err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF,
"spoofchk"); "spoofchk", true);
if (err) if (err)
return err; return err;
...@@ -205,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) ...@@ -205,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable)
int err; int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST, err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST,
"trust"); "trust", true);
if (err) if (err)
return err; return err;
...@@ -230,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, ...@@ -230,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf,
int err; int err;
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE, err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE,
"link_state"); "link_state", true);
if (err) if (err)
return err; return err;
...@@ -265,7 +267,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, ...@@ -265,7 +267,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
u8 flags; u8 flags;
int err; int err;
err = nfp_net_sriov_check(app, vf, 0, ""); err = nfp_net_sriov_check(app, vf, 0, "", true);
if (err) if (err)
return err; return err;
...@@ -285,13 +287,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, ...@@ -285,13 +287,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf,
ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag); ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag);
ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag); ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag);
if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto")) if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", false))
ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag)); ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag));
ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags); ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags);
ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags); ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags);
ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags);
err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", false);
if (!err) { if (!err) {
rate = readl(app->pf->vfcfg_tbl2 + vf_offset + rate = readl(app->pf->vfcfg_tbl2 + vf_offset +
NFP_NET_VF_CFG_RATE); NFP_NET_VF_CFG_RATE);
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
/* TLS socket options */ /* TLS socket options */
#define TLS_TX 1 /* Set transmit parameters */ #define TLS_TX 1 /* Set transmit parameters */
#define TLS_RX 2 /* Set receive parameters */ #define TLS_RX 2 /* Set receive parameters */
#define TLS_TX_ZEROCOPY_SENDFILE 3 /* transmit zerocopy sendfile */ #define TLS_TX_ZEROCOPY_RO 3 /* TX zerocopy (only sendfile now) */
/* Supported versions */ /* Supported versions */
#define TLS_VERSION_MINOR(ver) ((ver) & 0xFF) #define TLS_VERSION_MINOR(ver) ((ver) & 0xFF)
...@@ -161,7 +161,7 @@ enum { ...@@ -161,7 +161,7 @@ enum {
TLS_INFO_CIPHER, TLS_INFO_CIPHER,
TLS_INFO_TXCONF, TLS_INFO_TXCONF,
TLS_INFO_RXCONF, TLS_INFO_RXCONF,
TLS_INFO_ZC_SENDFILE, TLS_INFO_ZC_RO_TX,
__TLS_INFO_MAX, __TLS_INFO_MAX,
}; };
#define TLS_INFO_MAX (__TLS_INFO_MAX - 1) #define TLS_INFO_MAX (__TLS_INFO_MAX - 1)
......
...@@ -218,6 +218,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, ...@@ -218,6 +218,7 @@ seg6_lookup_any_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
struct flowi6 fl6; struct flowi6 fl6;
int dev_flags = 0; int dev_flags = 0;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_iif = skb->dev->ifindex; fl6.flowi6_iif = skb->dev->ifindex;
fl6.daddr = nhaddr ? *nhaddr : hdr->daddr; fl6.daddr = nhaddr ? *nhaddr : hdr->daddr;
fl6.saddr = hdr->saddr; fl6.saddr = hdr->saddr;
......
...@@ -544,7 +544,7 @@ static int do_tls_getsockopt(struct sock *sk, int optname, ...@@ -544,7 +544,7 @@ static int do_tls_getsockopt(struct sock *sk, int optname,
rc = do_tls_getsockopt_conf(sk, optval, optlen, rc = do_tls_getsockopt_conf(sk, optval, optlen,
optname == TLS_TX); optname == TLS_TX);
break; break;
case TLS_TX_ZEROCOPY_SENDFILE: case TLS_TX_ZEROCOPY_RO:
rc = do_tls_getsockopt_tx_zc(sk, optval, optlen); rc = do_tls_getsockopt_tx_zc(sk, optval, optlen);
break; break;
default: default:
...@@ -731,7 +731,7 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval, ...@@ -731,7 +731,7 @@ static int do_tls_setsockopt(struct sock *sk, int optname, sockptr_t optval,
optname == TLS_TX); optname == TLS_TX);
release_sock(sk); release_sock(sk);
break; break;
case TLS_TX_ZEROCOPY_SENDFILE: case TLS_TX_ZEROCOPY_RO:
lock_sock(sk); lock_sock(sk);
rc = do_tls_setsockopt_tx_zc(sk, optval, optlen); rc = do_tls_setsockopt_tx_zc(sk, optval, optlen);
release_sock(sk); release_sock(sk);
...@@ -970,7 +970,7 @@ static int tls_get_info(const struct sock *sk, struct sk_buff *skb) ...@@ -970,7 +970,7 @@ static int tls_get_info(const struct sock *sk, struct sk_buff *skb)
goto nla_failure; goto nla_failure;
if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) { if (ctx->tx_conf == TLS_HW && ctx->zerocopy_sendfile) {
err = nla_put_flag(skb, TLS_INFO_ZC_SENDFILE); err = nla_put_flag(skb, TLS_INFO_ZC_RO_TX);
if (err) if (err)
goto nla_failure; goto nla_failure;
} }
...@@ -994,7 +994,7 @@ static size_t tls_get_info_size(const struct sock *sk) ...@@ -994,7 +994,7 @@ static size_t tls_get_info_size(const struct sock *sk)
nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */ nla_total_size(sizeof(u16)) + /* TLS_INFO_CIPHER */
nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */ nla_total_size(sizeof(u16)) + /* TLS_INFO_RXCONF */
nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */ nla_total_size(sizeof(u16)) + /* TLS_INFO_TXCONF */
nla_total_size(0) + /* TLS_INFO_ZC_SENDFILE */ nla_total_size(0) + /* TLS_INFO_ZC_RO_TX */
0; 0;
return size; return size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment