Commit 63757225 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2022-07-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-07-28

Misc updates to mlx5 driver:

1) Gal corrects to use skb_tcp_all_headers on encapsulated skbs.

2) Roi Adds the support for offloading standalone police actions.

3) lama, did some refactoring to minimize code coupling with
mlx5e_priv "god object" in some of the follows, and converts some of the
objects to pointers to preserve on memory when these objects aren't needed.
This is part one of two parts series.

* tag 'mlx5-updates-2022-07-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Move mlx5e_init_l2_addr to en_main
  net/mlx5e: Split en_fs ndo's and move to en_main
  net/mlx5e: Separate mlx5e_set_rx_mode_work and move caller to en_main
  net/mlx5e: Add mdev to flow_steering struct
  net/mlx5e: Report flow steering errors with mdev err report API
  net/mlx5e: Convert mlx5e_flow_steering member of mlx5e_priv to pointer
  net/mlx5e: Allocate VLAN and TC for featured profiles only
  net/mlx5e: Make mlx5e_tc_table private
  net/mlx5e: Convert mlx5e_tc_table member of mlx5e_flow_steering to pointer
  net/mlx5e: TC, Support tc action api for police
  net/mlx5e: TC, Separate get/update/replace meter functions
  net/mlx5e: Add red and green counters for metering
  net/mlx5e: TC, Allocate post meter ft per rule
  net/mlx5: DR, Add support for flow metering ASO
  net/mlx5e: Fix wrong use of skb_tcp_all_headers() with encapsulation
====================

Link: https://lore.kernel.org/r/20220728205728.143074-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 84a8d931 069448b2
......@@ -921,7 +921,7 @@ struct mlx5e_priv {
struct mlx5e_rx_res *rx_res;
u32 *tx_rates;
struct mlx5e_flow_steering fs;
struct mlx5e_flow_steering *fs;
struct workqueue_struct *wq;
struct work_struct update_carrier_work;
......@@ -987,6 +987,8 @@ enum mlx5e_profile_feature {
MLX5E_PROFILE_FEATURE_PTP_RX,
MLX5E_PROFILE_FEATURE_PTP_TX,
MLX5E_PROFILE_FEATURE_QOS_HTB,
MLX5E_PROFILE_FEATURE_FS_VLAN,
MLX5E_PROFILE_FEATURE_FS_TC,
};
struct mlx5e_profile {
......@@ -1022,7 +1024,6 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close
void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats);
void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
int mlx5e_self_test_num(struct mlx5e_priv *priv);
int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
......
......@@ -15,29 +15,6 @@ enum {
MLX5E_TC_MISS_LEVEL,
};
struct mlx5e_tc_table {
/* Protects the dynamic assignment of the t parameter
* which is the nic tc root table.
*/
struct mutex t_lock;
struct mlx5_flow_table *t;
struct mlx5_flow_table *miss_t;
struct mlx5_fs_chains *chains;
struct mlx5e_post_act *post_act;
struct rhashtable ht;
struct mod_hdr_tbl mod_hdr;
struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
DECLARE_HASHTABLE(hairpin_tbl, 8);
struct notifier_block netdevice_nb;
struct netdev_net_notifier netdevice_nn;
struct mlx5_tc_ct_priv *ct;
struct mapping_ctx *mapping;
};
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
......@@ -160,16 +137,20 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU
struct mlx5e_accel_fs_tcp;
#endif
struct mlx5e_profile;
struct mlx5e_fs_udp;
struct mlx5e_fs_any;
struct mlx5e_ptp_fs;
struct mlx5e_flow_steering {
bool state_destroy;
bool vlan_strip_disable;
struct mlx5_core_dev *mdev;
struct mlx5_flow_namespace *ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
struct mlx5e_tc_table tc;
struct mlx5e_tc_table *tc;
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table *vlan;
struct mlx5e_l2_table l2;
......@@ -200,13 +181,22 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
int mlx5e_fs_init(struct mlx5e_priv *priv);
void mlx5e_fs_cleanup(struct mlx5e_priv *priv);
struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile,
struct mlx5_core_dev *mdev,
bool state_destroy);
void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs);
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev);
int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs,
struct net_device *netdev,
__be16 proto, u16 vid);
int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs,
struct net_device *netdev,
__be16 proto, u16 vid);
void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev);
#endif /* __MLX5E_FLOW_STEER_H__ */
......@@ -94,7 +94,7 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
fs_udp = priv->fs.udp;
fs_udp = priv->fs->udp;
ft = fs_udp->tables[type].t;
fs_udp_set_dport_flow(spec, type, d_port);
......@@ -121,10 +121,10 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ
struct mlx5e_fs_udp *fs_udp;
int err;
fs_udp = priv->fs.udp;
fs_udp = priv->fs->udp;
fs_udp_t = &fs_udp->tables[type];
dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type));
dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type));
rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
......@@ -208,7 +208,7 @@ static int fs_udp_create_groups(struct mlx5e_flow_table *ft, enum fs_udp_type ty
static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
{
struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type];
struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
......@@ -218,7 +218,7 @@ static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type)
ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
......@@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv)
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i));
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
......@@ -278,10 +278,10 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
dest.ft = priv->fs.udp->tables[i].t;
dest.ft = priv->fs->udp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest);
err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
......@@ -294,7 +294,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv)
void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
{
struct mlx5e_fs_udp *fs_udp = priv->fs.udp;
struct mlx5e_fs_udp *fs_udp = priv->fs->udp;
int i;
if (!fs_udp)
......@@ -309,20 +309,20 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv)
fs_udp_destroy_table(fs_udp, i);
kfree(fs_udp);
priv->fs.udp = NULL;
priv->fs->udp = NULL;
}
int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
{
int i, err;
if (priv->fs.udp) {
priv->fs.udp->ref_cnt++;
if (priv->fs->udp) {
priv->fs->udp->ref_cnt++;
return 0;
}
priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL);
if (!priv->fs.udp)
priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL);
if (!priv->fs->udp)
return -ENOMEM;
for (i = 0; i < FS_UDP_NUM_TYPES; i++) {
......@@ -335,16 +335,16 @@ int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_tables;
priv->fs.udp->ref_cnt = 1;
priv->fs->udp->ref_cnt = 1;
return 0;
err_destroy_tables:
while (--i >= 0)
fs_udp_destroy_table(priv->fs.udp, i);
fs_udp_destroy_table(priv->fs->udp, i);
kfree(priv->fs.udp);
priv->fs.udp = NULL;
kfree(priv->fs->udp);
priv->fs->udp = NULL;
return err;
}
......@@ -371,7 +371,7 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
fs_any = priv->fs.any;
fs_any = priv->fs->any;
ft = fs_any->table.t;
fs_any_set_ethertype_flow(spec, ether_type);
......@@ -398,10 +398,10 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv)
struct mlx5e_fs_any *fs_any;
int err;
fs_any = priv->fs.any;
fs_any = priv->fs->any;
fs_any_t = &fs_any->table;
dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY);
dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY);
rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
......@@ -474,7 +474,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
static int fs_any_create_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fs.any->table;
struct mlx5e_flow_table *ft = &priv->fs->any->table;
struct mlx5_flow_table_attr ft_attr = {};
int err;
......@@ -484,7 +484,7 @@ static int fs_any_create_table(struct mlx5e_priv *priv)
ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
......@@ -514,7 +514,7 @@ static int fs_any_disable(struct mlx5e_priv *priv)
int err;
/* Modify ttc rules destination to point back to the indir TIRs */
err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY);
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
......@@ -530,10 +530,10 @@ static int fs_any_enable(struct mlx5e_priv *priv)
int err;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.any->table.t;
dest.ft = priv->fs->any->table.t;
/* Modify ttc rules destination to point on the accel_fs FTs */
err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest);
err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
......@@ -555,7 +555,7 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any)
void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
{
struct mlx5e_fs_any *fs_any = priv->fs.any;
struct mlx5e_fs_any *fs_any = priv->fs->any;
if (!fs_any)
return;
......@@ -568,20 +568,20 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv)
fs_any_destroy_table(fs_any);
kfree(fs_any);
priv->fs.any = NULL;
priv->fs->any = NULL;
}
int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
{
int err;
if (priv->fs.any) {
priv->fs.any->ref_cnt++;
if (priv->fs->any) {
priv->fs->any->ref_cnt++;
return 0;
}
priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL);
if (!priv->fs.any)
priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL);
if (!priv->fs->any)
return -ENOMEM;
err = fs_any_create_table(priv);
......@@ -592,14 +592,14 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv)
if (err)
goto err_destroy_table;
priv->fs.any->ref_cnt = 1;
priv->fs->any->ref_cnt = 1;
return 0;
err_destroy_table:
fs_any_destroy_table(priv->fs.any);
fs_any_destroy_table(priv->fs->any);
kfree(priv->fs.any);
priv->fs.any = NULL;
kfree(priv->fs->any);
priv->fs->any = NULL;
return err;
}
......@@ -624,7 +624,7 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params)
static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
{
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!ptp_fs->valid)
return;
......@@ -641,7 +641,7 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv)
static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv)
{
u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res);
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
struct mlx5_flow_handle *rule;
int err;
......@@ -808,13 +808,13 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv)
if (!ptp_fs)
return -ENOMEM;
priv->fs.ptp_fs = ptp_fs;
priv->fs->ptp_fs = ptp_fs;
return 0;
}
void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv)
{
struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs;
struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs;
if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX))
return;
......
......@@ -21,6 +21,7 @@
#include "en/tc/sample.h"
#include "en_accel/ipsec_rxtx.h"
#include "en/tc/int_port.h"
#include "en/tc/act/act.h"
struct mlx5e_rep_indr_block_priv {
struct net_device *netdev;
......@@ -511,6 +512,120 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch,
return 0;
}
static int
mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv,
struct flow_offload_action *fl_act)
{
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
enum mlx5_flow_namespace_type ns_type;
struct flow_action_entry *action;
struct mlx5e_tc_act *act;
bool add = false;
int i;
/* There is no use case currently for more than one action (e.g. pedit).
* when there will be, need to handle cleaning multiple actions on err.
*/
if (!flow_offload_has_one_action(&fl_act->action))
return -EOPNOTSUPP;
if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
ns_type = MLX5_FLOW_NAMESPACE_FDB;
else
ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
flow_action_for_each(i, action, &fl_act->action) {
act = mlx5e_tc_act_get(action->id, ns_type);
if (!act)
continue;
if (!act->offload_action)
continue;
if (!act->offload_action(priv, fl_act, action))
add = true;
}
return add ? 0 : -EOPNOTSUPP;
}
static int
mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv,
struct flow_offload_action *fl_act)
{
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_tc_act *act;
if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
ns_type = MLX5_FLOW_NAMESPACE_FDB;
else
ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
act = mlx5e_tc_act_get(fl_act->id, ns_type);
if (!act || !act->destroy_action)
return -EOPNOTSUPP;
return act->destroy_action(priv, fl_act);
}
static int
mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv,
struct flow_offload_action *fl_act)
{
struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
enum mlx5_flow_namespace_type ns_type;
struct mlx5e_tc_act *act;
if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
ns_type = MLX5_FLOW_NAMESPACE_FDB;
else
ns_type = MLX5_FLOW_NAMESPACE_KERNEL;
act = mlx5e_tc_act_get(fl_act->id, ns_type);
if (!act || !act->stats_action)
return -EOPNOTSUPP;
return act->stats_action(priv, fl_act);
}
static int
mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv,
struct flow_offload_action *fl_act)
{
switch (fl_act->command) {
case FLOW_ACT_REPLACE:
return mlx5e_rep_indr_replace_act(rpriv, fl_act);
case FLOW_ACT_DESTROY:
return mlx5e_rep_indr_destroy_act(rpriv, fl_act);
case FLOW_ACT_STATS:
return mlx5e_rep_indr_stats_act(rpriv, fl_act);
default:
return -EOPNOTSUPP;
}
}
static int
mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv,
enum tc_setup_type type,
void *data)
{
if (!data)
return -EOPNOTSUPP;
switch (type) {
case TC_SETUP_ACT:
return mlx5e_rep_indr_setup_act(rpriv, data);
default:
return -EOPNOTSUPP;
}
}
static
int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv,
enum tc_setup_type type, void *type_data,
......@@ -518,7 +633,7 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *
void (*cleanup)(struct flow_block_cb *block_cb))
{
if (!netdev)
return -EOPNOTSUPP;
return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data);
switch (type) {
case TC_SETUP_BLOCK:
......
......@@ -50,6 +50,16 @@ struct mlx5e_tc_act {
bool (*is_multi_table_act)(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr);
int (*offload_action)(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act,
struct flow_action_entry *act);
int (*destroy_action)(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act);
int (*stats_action)(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act);
};
struct mlx5e_tc_flow_action {
......
......@@ -21,7 +21,7 @@ validate_goto_chain(struct mlx5e_priv *priv,
u32 max_chain;
esw = priv->mdev->priv.eswitch;
chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv);
chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc);
max_chain = mlx5_chains_get_chain_range(chains);
reformat_and_fwd = is_esw ?
MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
......
......@@ -24,14 +24,9 @@ tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state,
}
static int
tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
fill_meter_params_from_act(const struct flow_action_entry *act,
struct mlx5e_flow_meter_params *params)
{
struct mlx5e_flow_meter_params *params;
params = &attr->meter_attr.params;
params->index = act->hw_index;
if (act->police.rate_bytes_ps) {
params->mode = MLX5_RATE_LIMIT_BPS;
......@@ -46,6 +41,21 @@ tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
return -EOPNOTSUPP;
}
return 0;
}
static int
tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state,
const struct flow_action_entry *act,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
int err;
err = fill_meter_params_from_act(act, &attr->meter_attr.params);
if (err)
return err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO;
attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER;
......@@ -60,8 +70,84 @@ tc_act_is_multi_table_act_police(struct mlx5e_priv *priv,
return true;
}
static int
tc_act_police_offload(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act,
struct flow_action_entry *act)
{
struct mlx5e_flow_meter_params params = {};
struct mlx5e_flow_meter_handle *meter;
int err = 0;
err = fill_meter_params_from_act(act, &params);
if (err)
return err;
meter = mlx5e_tc_meter_get(priv->mdev, &params);
if (IS_ERR(meter) && PTR_ERR(meter) == -ENOENT) {
meter = mlx5e_tc_meter_replace(priv->mdev, &params);
} else if (!IS_ERR(meter)) {
err = mlx5e_tc_meter_update(meter, &params);
mlx5e_tc_meter_put(meter);
}
if (IS_ERR(meter)) {
NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
err = PTR_ERR(meter);
}
return err;
}
static int
tc_act_police_destroy(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act)
{
struct mlx5e_flow_meter_params params = {};
struct mlx5e_flow_meter_handle *meter;
params.index = fl_act->index;
meter = mlx5e_tc_meter_get(priv->mdev, &params);
if (IS_ERR(meter)) {
NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
return PTR_ERR(meter);
}
/* first put for the get and second for cleanup */
mlx5e_tc_meter_put(meter);
mlx5e_tc_meter_put(meter);
return 0;
}
static int
tc_act_police_stats(struct mlx5e_priv *priv,
struct flow_offload_action *fl_act)
{
struct mlx5e_flow_meter_params params = {};
struct mlx5e_flow_meter_handle *meter;
u64 bytes, packets, drops, lastuse;
params.index = fl_act->index;
meter = mlx5e_tc_meter_get(priv->mdev, &params);
if (IS_ERR(meter)) {
NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter");
mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index);
return PTR_ERR(meter);
}
mlx5e_tc_meter_get_stats(meter, &bytes, &packets, &drops, &lastuse);
flow_stats_update(&fl_act->stats, bytes, packets, drops, lastuse,
FLOW_ACTION_HW_STATS_DELAYED);
mlx5e_tc_meter_put(meter);
return 0;
}
struct mlx5e_tc_act mlx5e_tc_act_police = {
.can_offload = tc_act_can_offload_police,
.parse_action = tc_act_parse_police,
.is_multi_table_act = tc_act_is_multi_table_act_police,
.offload_action = tc_act_police_offload,
.destroy_action = tc_act_police_destroy,
.stats_action = tc_act_police_stats,
};
......@@ -6,7 +6,6 @@
#include "en/tc/post_act.h"
#include "meter.h"
#include "en/tc_priv.h"
#include "post_meter.h"
#define MLX5_START_COLOR_SHIFT 28
#define MLX5_METER_MODE_SHIFT 24
......@@ -47,8 +46,6 @@ struct mlx5e_flow_meters {
struct mlx5_core_dev *mdev;
struct mlx5e_post_act *post_act;
struct mlx5e_post_meter_priv *post_meter;
};
static void
......@@ -243,6 +240,7 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
struct mlx5_core_dev *mdev = flow_meters->mdev;
struct mlx5e_flow_meter_aso_obj *meters_obj;
struct mlx5e_flow_meter_handle *meter;
struct mlx5_fc *counter;
int err, pos, total;
u32 id;
......@@ -250,6 +248,20 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
if (!meter)
return ERR_PTR(-ENOMEM);
counter = mlx5_fc_create(mdev, true);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
goto err_red_counter;
}
meter->red_counter = counter;
counter = mlx5_fc_create(mdev, true);
if (IS_ERR(counter)) {
err = PTR_ERR(counter);
goto err_green_counter;
}
meter->green_counter = counter;
meters_obj = list_first_entry_or_null(&flow_meters->partial_list,
struct mlx5e_flow_meter_aso_obj,
entry);
......@@ -295,6 +307,10 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters)
err_mem:
mlx5e_flow_meter_destroy_aso_obj(mdev, id);
err_create:
mlx5_fc_destroy(mdev, meter->green_counter);
err_green_counter:
mlx5_fc_destroy(mdev, meter->red_counter);
err_red_counter:
kfree(meter);
return ERR_PTR(err);
}
......@@ -307,6 +323,9 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
struct mlx5e_flow_meter_aso_obj *meters_obj;
int n, pos;
mlx5_fc_destroy(mdev, meter->green_counter);
mlx5_fc_destroy(mdev, meter->red_counter);
meters_obj = meter->meters_obj;
pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx;
bitmap_clear(meters_obj->meters_map, pos, 1);
......@@ -325,75 +344,155 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter)
kfree(meter);
}
static struct mlx5e_flow_meter_handle *
__mlx5e_tc_meter_get(struct mlx5e_flow_meters *flow_meters, u32 index)
{
struct mlx5e_flow_meter_handle *meter;
hash_for_each_possible(flow_meters->hashtbl, meter, hlist, index)
if (meter->params.index == index)
goto add_ref;
return ERR_PTR(-ENOENT);
add_ref:
meter->refcnt++;
return meter;
}
struct mlx5e_flow_meter_handle *
mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
{
struct mlx5e_flow_meters *flow_meters;
struct mlx5e_flow_meter_handle *meter;
int err;
flow_meters = mlx5e_get_flow_meters(mdev);
if (!flow_meters)
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&flow_meters->sync_lock);
hash_for_each_possible(flow_meters->hashtbl, meter, hlist, params->index)
if (meter->params.index == params->index)
goto add_ref;
meter = __mlx5e_tc_meter_get(flow_meters, params->index);
mutex_unlock(&flow_meters->sync_lock);
meter = __mlx5e_flow_meter_alloc(flow_meters);
if (IS_ERR(meter)) {
err = PTR_ERR(meter);
goto err_alloc;
return meter;
}
static void
__mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
{
if (--meter->refcnt == 0) {
hash_del(&meter->hlist);
__mlx5e_flow_meter_free(meter);
}
}
void
mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
{
struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
mutex_lock(&flow_meters->sync_lock);
__mlx5e_tc_meter_put(meter);
mutex_unlock(&flow_meters->sync_lock);
}
static struct mlx5e_flow_meter_handle *
mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters,
struct mlx5e_flow_meter_params *params)
{
struct mlx5e_flow_meter_handle *meter;
meter = __mlx5e_flow_meter_alloc(flow_meters);
if (IS_ERR(meter))
return meter;
hash_add(flow_meters->hashtbl, &meter->hlist, params->index);
meter->params.index = params->index;
add_ref:
meter->refcnt++;
return meter;
}
static int
__mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
struct mlx5e_flow_meter_params *params)
{
struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
int err = 0;
if (meter->params.mode != params->mode || meter->params.rate != params->rate ||
meter->params.burst != params->burst) {
err = mlx5e_tc_meter_modify(mdev, meter, params);
if (err)
goto err_update;
goto out;
meter->params.mode = params->mode;
meter->params.rate = params->rate;
meter->params.burst = params->burst;
}
mutex_unlock(&flow_meters->sync_lock);
return meter;
out:
return err;
}
err_update:
if (--meter->refcnt == 0) {
hash_del(&meter->hlist);
__mlx5e_flow_meter_free(meter);
}
err_alloc:
int
mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
struct mlx5e_flow_meter_params *params)
{
struct mlx5_core_dev *mdev = meter->flow_meters->mdev;
struct mlx5e_flow_meters *flow_meters;
int err;
flow_meters = mlx5e_get_flow_meters(mdev);
if (!flow_meters)
return -EOPNOTSUPP;
mutex_lock(&flow_meters->sync_lock);
err = __mlx5e_tc_meter_update(meter, params);
mutex_unlock(&flow_meters->sync_lock);
return ERR_PTR(err);
return err;
}
void
mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter)
struct mlx5e_flow_meter_handle *
mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params)
{
struct mlx5e_flow_meters *flow_meters = meter->flow_meters;
struct mlx5e_flow_meters *flow_meters;
struct mlx5e_flow_meter_handle *meter;
int err;
flow_meters = mlx5e_get_flow_meters(mdev);
if (!flow_meters)
return ERR_PTR(-EOPNOTSUPP);
mutex_lock(&flow_meters->sync_lock);
if (--meter->refcnt == 0) {
hash_del(&meter->hlist);
__mlx5e_flow_meter_free(meter);
meter = __mlx5e_tc_meter_get(flow_meters, params->index);
if (IS_ERR(meter)) {
meter = mlx5e_tc_meter_alloc(flow_meters, params);
if (IS_ERR(meter)) {
err = PTR_ERR(meter);
goto err_get;
}
}
err = __mlx5e_tc_meter_update(meter, params);
if (err)
goto err_update;
mutex_unlock(&flow_meters->sync_lock);
return meter;
err_update:
__mlx5e_tc_meter_put(meter);
err_get:
mutex_unlock(&flow_meters->sync_lock);
return ERR_PTR(err);
}
struct mlx5_flow_table *
mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters)
enum mlx5_flow_namespace_type
mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters)
{
return mlx5e_post_meter_get_ft(flow_meters->post_meter);
return flow_meters->ns_type;
}
struct mlx5e_flow_meters *
......@@ -432,12 +531,6 @@ mlx5e_flow_meters_init(struct mlx5e_priv *priv,
goto err_sq;
}
flow_meters->post_meter = mlx5e_post_meter_init(priv, ns_type, post_act);
if (IS_ERR(flow_meters->post_meter)) {
err = PTR_ERR(flow_meters->post_meter);
goto err_post_meter;
}
mutex_init(&flow_meters->sync_lock);
INIT_LIST_HEAD(&flow_meters->partial_list);
INIT_LIST_HEAD(&flow_meters->full_list);
......@@ -451,8 +544,6 @@ mlx5e_flow_meters_init(struct mlx5e_priv *priv,
return flow_meters;
err_post_meter:
mlx5_aso_destroy(flow_meters->aso);
err_sq:
mlx5_core_dealloc_pd(mdev, flow_meters->pdn);
err_out:
......@@ -466,9 +557,23 @@ mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters)
if (IS_ERR_OR_NULL(flow_meters))
return;
mlx5e_post_meter_cleanup(flow_meters->post_meter);
mlx5_aso_destroy(flow_meters->aso);
mlx5_core_dealloc_pd(flow_meters->mdev, flow_meters->pdn);
kfree(flow_meters);
}
void
mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse)
{
u64 bytes1, packets1, lastuse1;
u64 bytes2, packets2, lastuse2;
mlx5_fc_query_cached(meter->green_counter, &bytes1, &packets1, &lastuse1);
mlx5_fc_query_cached(meter->red_counter, &bytes2, &packets2, &lastuse2);
*bytes = bytes1 + bytes2;
*packets = packets1 + packets2;
*drops = packets2;
*lastuse = max_t(u64, lastuse1, lastuse2);
}
......@@ -4,6 +4,7 @@
#ifndef __MLX5_EN_FLOW_METER_H__
#define __MLX5_EN_FLOW_METER_H__
struct mlx5e_post_meter_priv;
struct mlx5e_flow_meter_aso_obj;
struct mlx5e_flow_meters;
struct mlx5_flow_attr;
......@@ -30,11 +31,15 @@ struct mlx5e_flow_meter_handle {
int refcnt;
struct hlist_node hlist;
struct mlx5e_flow_meter_params params;
struct mlx5_fc *green_counter;
struct mlx5_fc *red_counter;
};
struct mlx5e_meter_attr {
struct mlx5e_flow_meter_params params;
struct mlx5e_flow_meter_handle *meter;
struct mlx5e_post_meter_priv *post_meter;
};
int
......@@ -46,9 +51,14 @@ struct mlx5e_flow_meter_handle *
mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
void
mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter);
int
mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter,
struct mlx5e_flow_meter_params *params);
struct mlx5e_flow_meter_handle *
mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params);
struct mlx5_flow_table *
mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters);
enum mlx5_flow_namespace_type
mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters);
struct mlx5e_flow_meters *
mlx5e_flow_meters_init(struct mlx5e_priv *priv,
......@@ -57,4 +67,8 @@ mlx5e_flow_meters_init(struct mlx5e_priv *priv,
void
mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters);
void
mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter,
u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse);
#endif /* __MLX5_EN_FLOW_METER_H__ */
......@@ -84,9 +84,11 @@ mlx5e_post_meter_fg_create(struct mlx5e_priv *priv,
static int
mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
struct mlx5e_post_meter_priv *post_meter,
struct mlx5e_post_act *post_act)
struct mlx5e_post_act *post_act,
struct mlx5_fc *green_counter,
struct mlx5_fc *red_counter)
{
struct mlx5_flow_destination dest = {};
struct mlx5_flow_destination dest[2] = {};
struct mlx5_flow_act flow_act = {};
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
......@@ -98,10 +100,13 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[0].counter_id = mlx5_fc_id(red_counter);
rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, NULL, 0);
rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 1);
if (IS_ERR(rule)) {
mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n");
err = PTR_ERR(rule);
......@@ -111,11 +116,14 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv,
mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG,
MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = mlx5e_tc_post_act_get_ft(post_act);
rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, &dest, 1);
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[0].ft = mlx5e_tc_post_act_get_ft(post_act);
dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
dest[1].counter_id = mlx5_fc_id(green_counter);
rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 2);
if (IS_ERR(rule)) {
mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n");
err = PTR_ERR(rule);
......@@ -155,7 +163,9 @@ mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter)
struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act)
struct mlx5e_post_act *post_act,
struct mlx5_fc *green_counter,
struct mlx5_fc *red_counter)
{
struct mlx5e_post_meter_priv *post_meter;
int err;
......@@ -172,7 +182,8 @@ mlx5e_post_meter_init(struct mlx5e_priv *priv,
if (err)
goto err_fg;
err = mlx5e_post_meter_rules_create(priv, post_meter, post_act);
err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, green_counter,
red_counter);
if (err)
goto err_rules;
......
......@@ -20,7 +20,9 @@ mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter);
struct mlx5e_post_meter_priv *
mlx5e_post_meter_init(struct mlx5e_priv *priv,
enum mlx5_flow_namespace_type ns_type,
struct mlx5e_post_act *post_act);
struct mlx5e_post_act *post_act,
struct mlx5_fc *green_counter,
struct mlx5_fc *red_counter);
void
mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter);
......
......@@ -11,7 +11,6 @@
#define MLX5E_TC_MAX_SPLITS 1
#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains)
enum {
MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT,
......@@ -44,6 +43,8 @@ struct mlx5e_tc_flow_parse_attr {
struct mlx5e_tc_act_parse_state parse_state;
};
struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc);
/* Helper struct for accessing a struct containing list_head array.
* Containing struct
* |- Helper array
......
......@@ -86,7 +86,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv,
if (!spec)
return ERR_PTR(-ENOMEM);
fs_tcp = priv->fs.accel_tcp;
fs_tcp = priv->fs->accel_tcp;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
......@@ -158,10 +158,10 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule;
int err = 0;
fs_tcp = priv->fs.accel_tcp;
fs_tcp = priv->fs->accel_tcp;
accel_fs_t = &fs_tcp->tables[type];
dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type));
dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type));
rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
......@@ -267,7 +267,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type)
{
struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type];
struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type];
struct mlx5_flow_table_attr ft_attr = {};
int err;
......@@ -277,7 +277,7 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_
ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
......@@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
/* Modify ttc rules destination to point back to the indir TIRs */
err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i));
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
......@@ -326,10 +326,10 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
dest.ft = priv->fs.accel_tcp->tables[i].t;
dest.ft = priv->fs->accel_tcp->tables[i].t;
/* Modify ttc rules destination to point on the accel_fs FTs */
err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest);
err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] destination to accel failed, err(%d)\n",
......@@ -344,7 +344,7 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i)
{
struct mlx5e_accel_fs_tcp *fs_tcp;
fs_tcp = priv->fs.accel_tcp;
fs_tcp = priv->fs->accel_tcp;
if (IS_ERR_OR_NULL(fs_tcp->tables[i].t))
return;
......@@ -357,7 +357,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
{
int i;
if (!priv->fs.accel_tcp)
if (!priv->fs->accel_tcp)
return;
accel_fs_tcp_disable(priv);
......@@ -365,8 +365,8 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv)
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++)
accel_fs_tcp_destroy_table(priv, i);
kfree(priv->fs.accel_tcp);
priv->fs.accel_tcp = NULL;
kfree(priv->fs->accel_tcp);
priv->fs->accel_tcp = NULL;
}
int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
......@@ -376,8 +376,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version))
return -EOPNOTSUPP;
priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL);
if (!priv->fs.accel_tcp)
priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL);
if (!priv->fs->accel_tcp)
return -ENOMEM;
for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) {
......@@ -396,7 +396,7 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv)
while (--i >= 0)
accel_fs_tcp_destroy_table(priv, i);
kfree(priv->fs.accel_tcp);
priv->fs.accel_tcp = NULL;
kfree(priv->fs->accel_tcp);
priv->fs->accel_tcp = NULL;
return err;
}
......@@ -184,13 +184,13 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
fs_prot = &accel_esp->fs_prot[type];
fs_prot->default_dest =
mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type));
mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type));
ft_attr.max_fte = 1;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft))
return PTR_ERR(ft);
......@@ -205,7 +205,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
ft_attr.prio = MLX5E_NIC_PRIO;
ft_attr.autogroup.num_reserved_entries = 1;
ft_attr.autogroup.max_num_groups = 1;
ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft)) {
err = PTR_ERR(ft);
goto err_fs_ft;
......@@ -249,7 +249,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
/* connect */
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = fs_prot->ft;
mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest);
mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest);
skip:
fs_prot->refcnt++;
......@@ -271,7 +271,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type)
goto out;
/* disconnect */
mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type));
mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type));
/* remove FT */
rx_destroy(priv, type);
......
......@@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv)
for (i = 0; i < ARFS_NUM_TYPES; i++) {
/* Modify ttc rules destination back to their default */
err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i));
err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i));
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] default destination failed, err(%d)\n",
......@@ -147,9 +147,9 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv)
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
dest.ft = priv->fs.arfs->arfs_tables[i].ft.t;
dest.ft = priv->fs->arfs->arfs_tables[i].ft.t;
/* Modify ttc rules destination to point on the aRFS FTs */
err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest);
err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest);
if (err) {
netdev_err(priv->netdev,
"%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
......@@ -172,10 +172,10 @@ static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv)
int i;
arfs_del_rules(priv);
destroy_workqueue(priv->fs.arfs->wq);
destroy_workqueue(priv->fs->arfs->wq);
for (i = 0; i < ARFS_NUM_TYPES; i++) {
if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]);
if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t))
arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]);
}
}
......@@ -185,13 +185,13 @@ void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv)
return;
_mlx5e_cleanup_tables(priv);
kvfree(priv->fs.arfs);
kvfree(priv->fs->arfs);
}
static int arfs_add_default_rule(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type];
struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type];
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
enum mlx5_traffic_types tt;
......@@ -321,7 +321,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
static int arfs_create_table(struct mlx5e_priv *priv,
enum arfs_type type)
{
struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
......@@ -332,7 +332,7 @@ static int arfs_create_table(struct mlx5e_priv *priv,
ft_attr.level = MLX5E_ARFS_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
ft->t = NULL;
......@@ -361,14 +361,14 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
if (!(priv->netdev->hw_features & NETIF_F_NTUPLE))
return 0;
priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL);
if (!priv->fs.arfs)
priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL);
if (!priv->fs->arfs)
return -ENOMEM;
spin_lock_init(&priv->fs.arfs->arfs_lock);
INIT_LIST_HEAD(&priv->fs.arfs->rules);
priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs.arfs->wq)
spin_lock_init(&priv->fs->arfs->arfs_lock);
INIT_LIST_HEAD(&priv->fs->arfs->rules);
priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
if (!priv->fs->arfs->wq)
goto err;
for (i = 0; i < ARFS_NUM_TYPES; i++) {
......@@ -381,7 +381,7 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
err_des:
_mlx5e_cleanup_tables(priv);
err:
kvfree(priv->fs.arfs);
kvfree(priv->fs->arfs);
return err;
}
......@@ -396,8 +396,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
int i;
int j;
spin_lock_bh(&priv->fs.arfs->arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
spin_lock_bh(&priv->fs->arfs->arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
......@@ -408,7 +408,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
break;
}
}
spin_unlock_bh(&priv->fs.arfs->arfs_lock);
spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
if (arfs_rule->rule)
mlx5_del_flow_rules(arfs_rule->rule);
......@@ -425,12 +425,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv)
int i;
int j;
spin_lock_bh(&priv->fs.arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) {
spin_lock_bh(&priv->fs->arfs->arfs_lock);
mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) {
hlist_del_init(&rule->hlist);
hlist_add_head(&rule->hlist, &del_list);
}
spin_unlock_bh(&priv->fs.arfs->arfs_lock);
spin_unlock_bh(&priv->fs->arfs->arfs_lock);
hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
cancel_work_sync(&rule->arfs_work);
......@@ -474,7 +474,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct arfs_rule *arfs_rule)
{
struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
struct mlx5_flow_destination dest = {};
......@@ -592,9 +592,9 @@ static void arfs_handle_work(struct work_struct *work)
mutex_lock(&priv->state_lock);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
spin_lock_bh(&priv->fs.arfs->arfs_lock);
spin_lock_bh(&priv->fs->arfs->arfs_lock);
hlist_del(&arfs_rule->hlist);
spin_unlock_bh(&priv->fs.arfs->arfs_lock);
spin_unlock_bh(&priv->fs->arfs->arfs_lock);
mutex_unlock(&priv->state_lock);
kfree(arfs_rule);
......@@ -647,7 +647,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
tuple->dst_port = fk->ports.dst;
rule->flow_id = flow_id;
rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER;
rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER;
hlist_add_head(&rule->hlist,
arfs_hash_bucket(arfs_t, tuple->src_port,
......@@ -691,7 +691,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_arfs_tables *arfs = priv->fs.arfs;
struct mlx5e_arfs_tables *arfs = priv->fs->arfs;
struct arfs_table *arfs_t;
struct arfs_rule *arfs_rule;
struct flow_keys fk;
......@@ -725,7 +725,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
return -ENOMEM;
}
}
queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work);
queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work);
spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id;
}
......
......@@ -81,18 +81,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
eth_ft = &priv->fs->ethtool.l3_l4_ft[prio];
break;
case ETHER_FLOW:
max_tuples = ETHTOOL_NUM_L2_FTS;
prio = max_tuples - num_tuples;
eth_ft = &priv->fs.ethtool.l2_ft[prio];
eth_ft = &priv->fs->ethtool.l2_ft[prio];
prio += MLX5E_ETHTOOL_L2_PRIO;
break;
default:
......@@ -383,14 +383,14 @@ static void add_rule_to_list(struct mlx5e_priv *priv,
struct mlx5e_ethtool_rule *rule)
{
struct mlx5e_ethtool_rule *iter;
struct list_head *head = &priv->fs.ethtool.rules;
struct list_head *head = &priv->fs->ethtool.rules;
list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location > rule->flow_spec.location)
break;
head = &iter->list;
}
priv->fs.ethtool.tot_num_rules++;
priv->fs->ethtool.tot_num_rules++;
list_add(&rule->list, head);
}
......@@ -507,7 +507,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv,
if (eth_rule->rss)
mlx5e_rss_refcnt_dec(eth_rule->rss);
list_del(&eth_rule->list);
priv->fs.ethtool.tot_num_rules--;
priv->fs->ethtool.tot_num_rules--;
put_flow_table(eth_rule->eth_ft);
kfree(eth_rule);
}
......@@ -517,7 +517,7 @@ static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv,
{
struct mlx5e_ethtool_rule *iter;
list_for_each_entry(iter, &priv->fs.ethtool.rules, list) {
list_for_each_entry(iter, &priv->fs->ethtool.rules, list) {
if (iter->flow_spec.location == location)
return iter;
}
......@@ -788,7 +788,7 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL;
list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) {
list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) {
int index;
if (eth_rule->flow_spec.location != location)
......@@ -831,13 +831,13 @@ void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv)
struct mlx5e_ethtool_rule *iter;
struct mlx5e_ethtool_rule *temp;
list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list)
list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list)
del_ethtool_rule(priv, iter);
}
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{
INIT_LIST_HEAD(&priv->fs.ethtool.rules);
INIT_LIST_HEAD(&priv->fs->ethtool.rules);
}
static int flow_type_to_traffic_type(u32 flow_type)
......@@ -963,7 +963,7 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv,
switch (info->cmd) {
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
info->rule_cnt = priv->fs->ethtool.tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
......
......@@ -3778,20 +3778,45 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
mutex_lock(&priv->state_lock);
priv->fs->vlan_strip_disable = !enable;
priv->channels.params.vlan_strip_disable = !enable;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
goto unlock;
err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
if (err)
if (err) {
priv->fs->vlan_strip_disable = enable;
priv->channels.params.vlan_strip_disable = enable;
}
unlock:
mutex_unlock(&priv->state_lock);
return err;
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_flow_steering *fs = priv->fs;
if (mlx5e_is_uplink_rep(priv))
return 0; /* no vlan table for uplink rep */
return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid);
}
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_flow_steering *fs = priv->fs;
if (mlx5e_is_uplink_rep(priv))
return 0; /* no vlan table for uplink rep */
return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid);
}
#ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable)
{
......@@ -3889,8 +3914,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
mutex_lock(&priv->state_lock);
params = &priv->channels.params;
if (!priv->fs.vlan ||
!bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) {
if (!priv->fs->vlan ||
!bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) {
/* HW strips the outer C-tag header, this is a problem
* for S-tag traffic.
*/
......@@ -5013,6 +5038,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_flow_steering *fs;
int err;
mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu);
......@@ -5020,11 +5046,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
err = mlx5e_fs_init(priv);
if (err) {
fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
if (!fs) {
err = -ENOMEM;
mlx5_core_err(mdev, "FS initialization failed, %d\n", err);
return err;
}
priv->fs = fs;
err = mlx5e_ipsec_init(priv);
if (err)
......@@ -5043,7 +5072,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
mlx5e_health_destroy_reporters(priv);
mlx5e_ktls_cleanup(priv);
mlx5e_ipsec_cleanup(priv);
mlx5e_fs_cleanup(priv);
mlx5e_fs_cleanup(priv->fs);
}
static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
......@@ -5166,7 +5195,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
mlx5e_init_l2_addr(priv);
mlx5e_fs_init_l2_addr(priv->fs, netdev);
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
......@@ -5251,7 +5280,9 @@ static const struct mlx5e_profile mlx5e_nic_profile = {
.stats_grps_num = mlx5e_nic_stats_grps_num,
.features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) |
BIT(MLX5E_PROFILE_FEATURE_PTP_TX) |
BIT(MLX5E_PROFILE_FEATURE_QOS_HTB),
BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) |
BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) |
BIT(MLX5E_PROFILE_FEATURE_FS_TC),
};
static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev,
......@@ -5301,6 +5332,14 @@ int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev)
+ mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile);
}
void mlx5e_set_rx_mode_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
set_rx_mode_work);
return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev);
}
/* mlx5e generic netdev management API (move to en_common.c) */
int mlx5e_priv_init(struct mlx5e_priv *priv,
const struct mlx5e_profile *profile,
......@@ -5478,6 +5517,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
int err;
clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
/* max number of channels may have changed */
max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile);
......@@ -5537,6 +5578,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
out:
mlx5e_reset_channels(priv->netdev);
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
}
......@@ -5546,6 +5589,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv)
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (priv->fs)
priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (profile->disable)
profile->disable(priv);
......
......@@ -718,6 +718,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev,
static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
{
mlx5e_fs_cleanup(priv->fs);
mlx5e_ipsec_cleanup(priv);
}
......@@ -728,8 +729,8 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
struct ttc_params ttc_params = {};
int err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
/* The inner_ttc in the ttc params is intentionally not set */
mlx5e_set_ttc_params(priv, &ttc_params, false);
......@@ -738,9 +739,9 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
/* To give uplik rep TTC a lower level for chaining from root ft */
ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1;
priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
if (IS_ERR(priv->fs.ttc)) {
err = PTR_ERR(priv->fs.ttc);
priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
if (IS_ERR(priv->fs->ttc)) {
err = PTR_ERR(priv->fs->ttc);
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n",
err);
return err;
......@@ -760,7 +761,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv)
/* non uplik reps will skip any bypass tables and go directly to
* their own ttc
*/
rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc);
rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc);
return 0;
}
......@@ -835,11 +836,20 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res)
priv->fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
}
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res) {
err = -ENOMEM;
goto err_free_fs;
}
mlx5e_init_l2_addr(priv);
mlx5e_fs_init_l2_addr(priv->fs, priv->netdev);
err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
if (err) {
......@@ -873,13 +883,15 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err_destroy_root_ft:
mlx5e_destroy_rep_root_ft(priv);
err_destroy_ttc_table:
mlx5_destroy_ttc_table(priv->fs.ttc);
mlx5_destroy_ttc_table(priv->fs->ttc);
err_destroy_rx_res:
mlx5e_rx_res_destroy(priv->rx_res);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
err_free_fs:
mlx5e_fs_cleanup(priv->fs);
return err;
}
......@@ -888,7 +900,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5_destroy_ttc_table(priv->fs.ttc);
mlx5_destroy_ttc_table(priv->fs->ttc);
mlx5e_rx_res_destroy(priv->rx_res);
mlx5e_close_drop_rq(&priv->drop_rq);
mlx5e_rx_res_free(priv->rx_res);
......
......@@ -356,6 +356,8 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
#endif
#if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
struct mlx5e_tc_table *mlx5e_tc_table_alloc(void);
void mlx5e_tc_table_free(struct mlx5e_tc_table *tc);
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
......@@ -376,6 +378,8 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb);
#else /* CONFIG_MLX5_CLS_ACT */
static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; }
static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {}
static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe)
{ return false; }
static inline bool
......
......@@ -152,7 +152,7 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
*hopbyhop = 0;
if (skb->encapsulation) {
ihs = skb_tcp_all_headers(skb);
ihs = skb_inner_tcp_all_headers(skb);
stats->tso_inner_packets++;
stats->tso_inner_bytes += skb->len - ihs;
} else {
......
......@@ -322,10 +322,10 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{
int err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
priv->fs->ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
if (!priv->fs.ns)
if (!priv->fs->ns)
return -EINVAL;
err = mlx5e_arfs_create_tables(priv);
......@@ -364,9 +364,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
struct mlx5_core_dev *mdev = priv->mdev;
int err;
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res)
priv->fs = mlx5e_fs_init(priv->profile, mdev,
!test_bit(MLX5E_STATE_DESTROYING, &priv->state));
if (!priv->fs) {
netdev_err(priv->netdev, "FS allocation failed\n");
return -ENOMEM;
}
priv->rx_res = mlx5e_rx_res_alloc();
if (!priv->rx_res) {
err = -ENOMEM;
goto err_free_fs;
}
mlx5e_create_q_counters(priv);
......@@ -397,6 +406,8 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
err_free_fs:
mlx5e_fs_cleanup(priv->fs);
return err;
}
......@@ -408,6 +419,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
mlx5e_destroy_q_counters(priv);
mlx5e_rx_res_free(priv->rx_res);
priv->rx_res = NULL;
mlx5e_fs_cleanup(priv->fs);
}
/* The stats groups order is opposite to the update_stats() order calls */
......
......@@ -89,6 +89,7 @@ enum dr_ste_v1_action_id {
DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d,
DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e,
DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f,
DR_STE_V1_ACTION_ID_ASO = 0x12,
DR_STE_V1_ACTION_ID_TRAILER = 0x13,
DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14,
DR_STE_V1_ACTION_ID_MAX = 0x21,
......@@ -129,6 +130,10 @@ enum {
DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91,
};
enum dr_ste_v1_aso_ctx_type {
DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2,
};
static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = {
[MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = {
.hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31,
......@@ -494,6 +499,27 @@ static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p,
dr_ste_v1_set_reparse(hw_ste_p);
}
static void dr_ste_v1_set_aso_flow_meter(u8 *d_action,
u32 object_id,
u32 offset,
u8 dest_reg_id,
u8 init_color)
{
MLX5_SET(ste_double_action_aso_v1, d_action, action_id,
DR_STE_V1_ACTION_ID_ASO);
MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number,
object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ));
/* Convert reg_c index to HW 64bit index */
MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id,
(dest_reg_id - 1) / 2);
MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type,
DR_STE_V1_ASO_CTX_TYPE_POLICERS);
MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id,
offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ);
MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color,
init_color);
}
static void dr_ste_v1_arr_init_next_match(u8 **last_ste,
u32 *added_stes,
u16 gvmi)
......@@ -629,6 +655,21 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_aso_flow_meter(action,
attr->aso_flow_meter.obj_id,
attr->aso_flow_meter.offset,
attr->aso_flow_meter.dest_reg_id,
attr->aso_flow_meter.init_color);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
......@@ -802,6 +843,21 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn,
action += DR_STE_ACTION_SINGLE_SZ;
}
if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) {
if (action_sz < DR_STE_ACTION_DOUBLE_SZ) {
dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi);
action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action);
action_sz = DR_STE_ACTION_TRIPLE_SZ;
}
dr_ste_v1_set_aso_flow_meter(action,
attr->aso_flow_meter.obj_id,
attr->aso_flow_meter.offset,
attr->aso_flow_meter.dest_reg_id,
attr->aso_flow_meter.init_color);
action_sz -= DR_STE_ACTION_DOUBLE_SZ;
action += DR_STE_ACTION_DOUBLE_SZ;
}
dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi);
dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1);
}
......
......@@ -127,6 +127,7 @@ enum mlx5dr_action_type {
DR_ACTION_TYP_INSERT_HDR,
DR_ACTION_TYP_REMOVE_HDR,
DR_ACTION_TYP_SAMPLER,
DR_ACTION_TYP_ASO_FLOW_METER,
DR_ACTION_TYP_MAX,
};
......@@ -271,6 +272,13 @@ struct mlx5dr_ste_actions_attr {
int count;
u32 headers[MLX5DR_MAX_VLANS];
} vlans;
struct {
u32 obj_id;
u32 offset;
u8 dest_reg_id;
u8 init_color;
} aso_flow_meter;
};
void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx,
......@@ -1035,6 +1043,14 @@ struct mlx5dr_rule_action_member {
struct list_head list;
};
struct mlx5dr_action_aso_flow_meter {
struct mlx5dr_domain *dmn;
u32 obj_id;
u32 offset;
u8 dest_reg_id;
u8 init_color;
};
struct mlx5dr_action {
enum mlx5dr_action_type action_type;
refcount_t refcount;
......@@ -1049,6 +1065,7 @@ struct mlx5dr_action {
struct mlx5dr_action_vport *vport;
struct mlx5dr_action_push_vlan *push_vlan;
struct mlx5dr_action_flow_tag *flow_tag;
struct mlx5dr_action_aso_flow_meter *aso;
};
};
......
......@@ -500,6 +500,27 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
}
}
if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) {
if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) {
err = -EOPNOTSUPP;
goto free_actions;
}
tmp_action =
mlx5dr_action_create_aso(domain,
fte->action.exe_aso.object_id,
fte->action.exe_aso.return_reg_id,
fte->action.exe_aso.type,
fte->action.exe_aso.flow_meter.init_color,
fte->action.exe_aso.flow_meter.meter_idx);
if (!tmp_action) {
err = -ENOMEM;
goto free_actions;
}
fs_dr_actions[fs_dr_num_actions++] = tmp_action;
actions[num_actions++] = tmp_action;
}
params.match_sz = match_sz;
params.match_buf = (u64 *)fte->val;
if (num_term_actions == 1) {
......
......@@ -574,4 +574,30 @@ struct mlx5_ifc_dr_action_hw_copy_bits {
u8 reserved_at_38[0x8];
};
enum {
MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ = 2,
};
struct mlx5_ifc_ste_aso_flow_meter_action_bits {
u8 reserved_at_0[0xc];
u8 action[0x1];
u8 initial_color[0x2];
u8 line_id[0x1];
};
struct mlx5_ifc_ste_double_action_aso_v1_bits {
u8 action_id[0x8];
u8 aso_context_number[0x18];
u8 dest_reg_id[0x2];
u8 change_ordering_tag[0x1];
u8 aso_check_ordering[0x1];
u8 aso_context_type[0x4];
u8 reserved_at_28[0x8];
union {
u8 aso_fields[0x10];
struct mlx5_ifc_ste_aso_flow_meter_action_bits flow_meter;
};
};
#endif /* MLX5_IFC_DR_H */
......@@ -131,6 +131,14 @@ struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void);
struct mlx5dr_action *
mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr);
struct mlx5dr_action *
mlx5dr_action_create_aso(struct mlx5dr_domain *dmn,
u32 obj_id,
u8 return_reg_id,
u8 aso_type,
u8 init_color,
u8 meter_id);
int mlx5dr_action_destroy(struct mlx5dr_action *action);
static inline bool
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment