Commit e4d264e8 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'ipsec-libreswan-mlx5' of https://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux

Leon Romanovsky says:

====================
Extend packet offload to fully support libreswan

The following patches are an outcome of Raed's work to add packet
offload support to libreswan [1].

The series includes:
 * Priority support to IPsec policies
 * Statistics per-SA (visible through "ip -s xfrm state ..." command)
 * Support to IKE policy holes
 * Fine tuning to acquire logic.

[1] https://github.com/libreswan/libreswan/pull/986
Link: https://lore.kernel.org/all/cover.1678714336.git.leon@kernel.org

* tag 'ipsec-libreswan-mlx5' of https://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux:
  net/mlx5e: Update IPsec per SA packets/bytes count
  net/mlx5e: Use one rule to count all IPsec Tx offloaded traffic
  net/mlx5e: Support IPsec acquire default SA
  net/mlx5e: Allow policies with reqid 0, to support IKE policy holes
  xfrm: copy_to_user_state fetch offloaded SA packets/bytes statistics
  xfrm: add new device offload acquire flag
  net/mlx5e: Use chains for IPsec policy priority offload
  net/mlx5: fs_core: Allow ignore_flow_level on TX dest
  net/mlx5: fs_chains: Refactor to detach chains from tc usage
====================

Link: https://lore.kernel.org/r/20230320094722.1009304-1-leon@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3079bfdb 5a6cddb8
...@@ -308,6 +308,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, ...@@ -308,6 +308,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
struct net_device *netdev = x->xso.real_dev; struct net_device *netdev = x->xso.real_dev;
struct mlx5e_ipsec *ipsec; struct mlx5e_ipsec *ipsec;
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
gfp_t gfp;
int err; int err;
priv = netdev_priv(netdev); priv = netdev_priv(netdev);
...@@ -315,16 +316,20 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, ...@@ -315,16 +316,20 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
return -EOPNOTSUPP; return -EOPNOTSUPP;
ipsec = priv->ipsec; ipsec = priv->ipsec;
err = mlx5e_xfrm_validate_state(priv->mdev, x, extack); gfp = (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ) ? GFP_ATOMIC : GFP_KERNEL;
if (err) sa_entry = kzalloc(sizeof(*sa_entry), gfp);
return err;
sa_entry = kzalloc(sizeof(*sa_entry), GFP_KERNEL);
if (!sa_entry) if (!sa_entry)
return -ENOMEM; return -ENOMEM;
sa_entry->x = x; sa_entry->x = x;
sa_entry->ipsec = ipsec; sa_entry->ipsec = ipsec;
/* Check if this SA is originated from acquire flow temporary SA */
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
goto out;
err = mlx5e_xfrm_validate_state(priv->mdev, x, extack);
if (err)
goto err_xfrm;
/* check esn */ /* check esn */
mlx5e_ipsec_update_esn_state(sa_entry); mlx5e_ipsec_update_esn_state(sa_entry);
...@@ -353,6 +358,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x, ...@@ -353,6 +358,7 @@ static int mlx5e_xfrm_add_state(struct xfrm_state *x,
mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv; mlx5e_ipsec_set_iv_esn : mlx5e_ipsec_set_iv;
INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state); INIT_WORK(&sa_entry->modify_work.work, _update_xfrm_state);
out:
x->xso.offload_handle = (unsigned long)sa_entry; x->xso.offload_handle = (unsigned long)sa_entry;
return 0; return 0;
...@@ -372,6 +378,9 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x) ...@@ -372,6 +378,9 @@ static void mlx5e_xfrm_del_state(struct xfrm_state *x)
struct mlx5e_ipsec *ipsec = sa_entry->ipsec; struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_sa_entry *old; struct mlx5e_ipsec_sa_entry *old;
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
return;
old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id); old = xa_erase_bh(&ipsec->sadb, sa_entry->ipsec_obj_id);
WARN_ON(old != sa_entry); WARN_ON(old != sa_entry);
} }
...@@ -380,9 +389,13 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x) ...@@ -380,9 +389,13 @@ static void mlx5e_xfrm_free_state(struct xfrm_state *x)
{ {
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
goto sa_entry_free;
cancel_work_sync(&sa_entry->modify_work.work); cancel_work_sync(&sa_entry->modify_work.work);
mlx5e_accel_ipsec_fs_del_rule(sa_entry); mlx5e_accel_ipsec_fs_del_rule(sa_entry);
mlx5_ipsec_free_sa_ctx(sa_entry); mlx5_ipsec_free_sa_ctx(sa_entry);
sa_entry_free:
kfree(sa_entry); kfree(sa_entry);
} }
...@@ -482,26 +495,26 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x) ...@@ -482,26 +495,26 @@ static void mlx5e_xfrm_advance_esn_state(struct xfrm_state *x)
static void mlx5e_xfrm_update_curlft(struct xfrm_state *x) static void mlx5e_xfrm_update_curlft(struct xfrm_state *x)
{ {
struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x); struct mlx5e_ipsec_sa_entry *sa_entry = to_ipsec_sa_entry(x);
int err; struct mlx5e_ipsec_rule *ipsec_rule = &sa_entry->ipsec_rule;
u64 packets, bytes, lastuse;
lockdep_assert_held(&x->lock); lockdep_assert(lockdep_is_held(&x->lock) ||
lockdep_is_held(&dev_net(x->xso.real_dev)->xfrm.xfrm_cfg_mutex));
if (sa_entry->attrs.soft_packet_limit == XFRM_INF) if (x->xso.flags & XFRM_DEV_OFFLOAD_FLAG_ACQ)
/* Limits are not configured, as soft limit
* must be lowever than hard limit.
*/
return; return;
err = mlx5e_ipsec_aso_query(sa_entry, NULL); mlx5_fc_query_cached(ipsec_rule->fc, &bytes, &packets, &lastuse);
if (err) x->curlft.packets += packets;
return; x->curlft.bytes += bytes;
mlx5e_ipsec_aso_update_curlft(sa_entry, &x->curlft.packets);
} }
static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x, static int mlx5e_xfrm_validate_policy(struct mlx5_core_dev *mdev,
struct xfrm_policy *x,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct xfrm_selector *sel = &x->selector;
if (x->type != XFRM_POLICY_TYPE_MAIN) { if (x->type != XFRM_POLICY_TYPE_MAIN) {
NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types"); NL_SET_ERR_MSG_MOD(extack, "Cannot offload non-main policy types");
return -EINVAL; return -EINVAL;
...@@ -519,8 +532,9 @@ static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x, ...@@ -519,8 +532,9 @@ static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x,
return -EINVAL; return -EINVAL;
} }
if (!x->xfrm_vec[0].reqid) { if (!x->xfrm_vec[0].reqid && sel->proto == IPPROTO_IP &&
NL_SET_ERR_MSG_MOD(extack, "Cannot offload policy without reqid"); addr6_all_zero(sel->saddr.a6) && addr6_all_zero(sel->daddr.a6)) {
NL_SET_ERR_MSG_MOD(extack, "Unsupported policy with reqid 0 without at least one of upper protocol or ip addr(s) different than 0");
return -EINVAL; return -EINVAL;
} }
...@@ -529,12 +543,24 @@ static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x, ...@@ -529,12 +543,24 @@ static int mlx5e_xfrm_validate_policy(struct xfrm_policy *x,
return -EINVAL; return -EINVAL;
} }
if (x->selector.proto != IPPROTO_IP && if (sel->proto != IPPROTO_IP &&
(x->selector.proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) { (sel->proto != IPPROTO_UDP || x->xdo.dir != XFRM_DEV_OFFLOAD_OUT)) {
NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction"); NL_SET_ERR_MSG_MOD(extack, "Device does not support upper protocol other than UDP, and only Tx direction");
return -EINVAL; return -EINVAL;
} }
if (x->priority) {
if (!(mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_PRIO)) {
NL_SET_ERR_MSG_MOD(extack, "Device does not support policy priority");
return -EINVAL;
}
if (x->priority == U32_MAX) {
NL_SET_ERR_MSG_MOD(extack, "Device does not support requested policy priority");
return -EINVAL;
}
}
return 0; return 0;
} }
...@@ -560,6 +586,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry, ...@@ -560,6 +586,7 @@ mlx5e_ipsec_build_accel_pol_attrs(struct mlx5e_ipsec_pol_entry *pol_entry,
attrs->upspec.sport = ntohs(sel->sport); attrs->upspec.sport = ntohs(sel->sport);
attrs->upspec.sport_mask = ntohs(sel->sport_mask); attrs->upspec.sport_mask = ntohs(sel->sport_mask);
attrs->upspec.proto = sel->proto; attrs->upspec.proto = sel->proto;
attrs->prio = x->priority;
} }
static int mlx5e_xfrm_add_policy(struct xfrm_policy *x, static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
...@@ -576,7 +603,7 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x, ...@@ -576,7 +603,7 @@ static int mlx5e_xfrm_add_policy(struct xfrm_policy *x,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
err = mlx5e_xfrm_validate_policy(x, extack); err = mlx5e_xfrm_validate_policy(priv->mdev, x, extack);
if (err) if (err)
return err; return err;
......
...@@ -94,6 +94,7 @@ enum mlx5_ipsec_cap { ...@@ -94,6 +94,7 @@ enum mlx5_ipsec_cap {
MLX5_IPSEC_CAP_ESN = 1 << 1, MLX5_IPSEC_CAP_ESN = 1 << 1,
MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2, MLX5_IPSEC_CAP_PACKET_OFFLOAD = 1 << 2,
MLX5_IPSEC_CAP_ROCE = 1 << 3, MLX5_IPSEC_CAP_ROCE = 1 << 3,
MLX5_IPSEC_CAP_PRIO = 1 << 4,
}; };
struct mlx5e_priv; struct mlx5e_priv;
...@@ -161,6 +162,7 @@ struct mlx5e_ipsec_rule { ...@@ -161,6 +162,7 @@ struct mlx5e_ipsec_rule {
struct mlx5_flow_handle *rule; struct mlx5_flow_handle *rule;
struct mlx5_modify_hdr *modify_hdr; struct mlx5_modify_hdr *modify_hdr;
struct mlx5_pkt_reformat *pkt_reformat; struct mlx5_pkt_reformat *pkt_reformat;
struct mlx5_fc *fc;
}; };
struct mlx5e_ipsec_modify_state_work { struct mlx5e_ipsec_modify_state_work {
...@@ -198,6 +200,7 @@ struct mlx5_accel_pol_xfrm_attrs { ...@@ -198,6 +200,7 @@ struct mlx5_accel_pol_xfrm_attrs {
u8 type : 2; u8 type : 2;
u8 dir : 2; u8 dir : 2;
u32 reqid; u32 reqid;
u32 prio;
}; };
struct mlx5e_ipsec_pol_entry { struct mlx5e_ipsec_pol_entry {
...@@ -233,9 +236,6 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec); ...@@ -233,9 +236,6 @@ void mlx5e_ipsec_aso_cleanup(struct mlx5e_ipsec *ipsec);
int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry, int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
struct mlx5_wqe_aso_ctrl_seg *data); struct mlx5_wqe_aso_ctrl_seg *data);
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
u64 *packets);
void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv, void mlx5e_accel_ipsec_fs_read_stats(struct mlx5e_priv *priv,
void *ipsec_stats); void *ipsec_stats);
...@@ -252,6 +252,13 @@ mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry) ...@@ -252,6 +252,13 @@ mlx5e_ipsec_pol2dev(struct mlx5e_ipsec_pol_entry *pol_entry)
{ {
return pol_entry->ipsec->mdev; return pol_entry->ipsec->mdev;
} }
static inline bool addr6_all_zero(__be32 *addr6)
{
static const __be32 zaddr6[4] = {};
return !memcmp(addr6, zaddr6, sizeof(*zaddr6));
}
#else #else
static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv) static inline void mlx5e_ipsec_init(struct mlx5e_priv *priv)
{ {
......
...@@ -36,11 +36,18 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev) ...@@ -36,11 +36,18 @@ u32 mlx5_ipsec_device_caps(struct mlx5_core_dev *mdev)
MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp)) MLX5_CAP_ETH(mdev, insert_trailer) && MLX5_CAP_ETH(mdev, swp))
caps |= MLX5_IPSEC_CAP_CRYPTO; caps |= MLX5_IPSEC_CAP_CRYPTO;
if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload) && if (MLX5_CAP_IPSEC(mdev, ipsec_full_offload)) {
MLX5_CAP_FLOWTABLE_NIC_TX(mdev, reformat_add_esp_trasport) && if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev,
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, reformat_del_esp_trasport) && reformat_add_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap)) MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD; reformat_del_esp_trasport) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, decap))
caps |= MLX5_IPSEC_CAP_PACKET_OFFLOAD;
if (MLX5_CAP_FLOWTABLE_NIC_TX(mdev, ignore_flow_level) &&
MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ignore_flow_level))
caps |= MLX5_IPSEC_CAP_PRIO;
}
if (mlx5_get_roce_state(mdev) && if (mlx5_get_roce_state(mdev) &&
MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA && MLX5_CAP_GEN_2(mdev, flow_table_type_2_type) & MLX5_FT_NIC_RX_2_NIC_RX_RDMA &&
...@@ -482,18 +489,3 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry, ...@@ -482,18 +489,3 @@ int mlx5e_ipsec_aso_query(struct mlx5e_ipsec_sa_entry *sa_entry,
spin_unlock_bh(&aso->lock); spin_unlock_bh(&aso->lock);
return ret; return ret;
} }
void mlx5e_ipsec_aso_update_curlft(struct mlx5e_ipsec_sa_entry *sa_entry,
u64 *packets)
{
struct mlx5e_ipsec *ipsec = sa_entry->ipsec;
struct mlx5e_ipsec_aso *aso = ipsec->aso;
u64 hard_cnt;
hard_cnt = MLX5_GET(ipsec_aso, aso->ctx, remove_flow_pkt_cnt);
/* HW decresases the limit till it reaches zero to fire an avent.
* We need to fix the calculations, so the returned count is a total
* number of passed packets and not how much left.
*/
*packets = sa_entry->attrs.hard_packet_limit - hard_cnt;
}
...@@ -5181,22 +5181,6 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this, ...@@ -5181,22 +5181,6 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this,
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
{
int tc_grp_size, tc_tbl_size;
u32 max_flow_counter;
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
MLX5_CAP_GEN(dev, max_flow_counter_15_0);
tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
return tc_tbl_size;
}
static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv) static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
{ {
struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs); struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
...@@ -5269,10 +5253,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) ...@@ -5269,10 +5253,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED | attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED; MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
attr.ns = MLX5_FLOW_NAMESPACE_KERNEL; attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS; attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
attr.default_ft = tc->miss_t; attr.default_ft = tc->miss_t;
attr.mapping = chains_mapping; attr.mapping = chains_mapping;
attr.fs_base_prio = MLX5E_TC_PRIO;
tc->chains = mlx5_chains_create(dev, &attr); tc->chains = mlx5_chains_create(dev, &attr);
if (IS_ERR(tc->chains)) { if (IS_ERR(tc->chains)) {
...@@ -5280,6 +5264,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) ...@@ -5280,6 +5264,8 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
goto err_miss; goto err_miss;
} }
mlx5_chains_print_info(tc->chains);
tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL); tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr, tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
......
...@@ -1374,14 +1374,11 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) ...@@ -1374,14 +1374,11 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
struct mlx5_flow_table *nf_ft, *ft; struct mlx5_flow_table *nf_ft, *ft;
struct mlx5_chains_attr attr = {}; struct mlx5_chains_attr attr = {};
struct mlx5_fs_chains *chains; struct mlx5_fs_chains *chains;
u32 fdb_max;
int err; int err;
fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
esw_init_chains_offload_flags(esw, &attr.flags); esw_init_chains_offload_flags(esw, &attr.flags);
attr.ns = MLX5_FLOW_NAMESPACE_FDB; attr.ns = MLX5_FLOW_NAMESPACE_FDB;
attr.max_ft_sz = fdb_max; attr.fs_base_prio = FDB_TC_OFFLOAD;
attr.max_grp_num = esw->params.large_group_num; attr.max_grp_num = esw->params.large_group_num;
attr.default_ft = miss_fdb; attr.default_ft = miss_fdb;
attr.mapping = esw->offloads.reg_c0_obj_pool; attr.mapping = esw->offloads.reg_c0_obj_pool;
...@@ -1392,6 +1389,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb) ...@@ -1392,6 +1389,7 @@ esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
esw_warn(dev, "Failed to create fdb chains err(%d)\n", err); esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
return err; return err;
} }
mlx5_chains_print_info(chains);
esw->fdb_table.offloads.esw_chains_priv = chains; esw->fdb_table.offloads.esw_chains_priv = chains;
......
...@@ -137,7 +137,7 @@ ...@@ -137,7 +137,7 @@
#define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1) #define LAG_MIN_LEVEL (OFFLOADS_MIN_LEVEL + KERNEL_RX_MACSEC_MIN_LEVEL + 1)
#define KERNEL_TX_IPSEC_NUM_PRIOS 1 #define KERNEL_TX_IPSEC_NUM_PRIOS 1
#define KERNEL_TX_IPSEC_NUM_LEVELS 2 #define KERNEL_TX_IPSEC_NUM_LEVELS 3
#define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS) #define KERNEL_TX_IPSEC_MIN_LEVEL (KERNEL_TX_IPSEC_NUM_LEVELS)
#define KERNEL_TX_MACSEC_NUM_PRIOS 1 #define KERNEL_TX_MACSEC_NUM_PRIOS 1
...@@ -1762,7 +1762,8 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest, ...@@ -1762,7 +1762,8 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
if (ignore_level) { if (ignore_level) {
if (ft->type != FS_FT_FDB && if (ft->type != FS_FT_FDB &&
ft->type != FS_FT_NIC_RX) ft->type != FS_FT_NIC_RX &&
ft->type != FS_FT_NIC_TX)
return false; return false;
if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && if (dest->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
......
...@@ -14,10 +14,8 @@ ...@@ -14,10 +14,8 @@
#define chains_lock(chains) ((chains)->lock) #define chains_lock(chains) ((chains)->lock)
#define chains_ht(chains) ((chains)->chains_ht) #define chains_ht(chains) ((chains)->chains_ht)
#define prios_ht(chains) ((chains)->prios_ht) #define prios_ht(chains) ((chains)->prios_ht)
#define tc_default_ft(chains) ((chains)->tc_default_ft) #define chains_default_ft(chains) ((chains)->chains_default_ft)
#define tc_end_ft(chains) ((chains)->tc_end_ft) #define chains_end_ft(chains) ((chains)->chains_end_ft)
#define ns_to_chains_fs_prio(ns) ((ns) == MLX5_FLOW_NAMESPACE_FDB ? \
FDB_TC_OFFLOAD : MLX5E_TC_PRIO)
#define FT_TBL_SZ (64 * 1024) #define FT_TBL_SZ (64 * 1024)
struct mlx5_fs_chains { struct mlx5_fs_chains {
...@@ -28,13 +26,15 @@ struct mlx5_fs_chains { ...@@ -28,13 +26,15 @@ struct mlx5_fs_chains {
/* Protects above chains_ht and prios_ht */ /* Protects above chains_ht and prios_ht */
struct mutex lock; struct mutex lock;
struct mlx5_flow_table *tc_default_ft; struct mlx5_flow_table *chains_default_ft;
struct mlx5_flow_table *tc_end_ft; struct mlx5_flow_table *chains_end_ft;
struct mapping_ctx *chains_mapping; struct mapping_ctx *chains_mapping;
enum mlx5_flow_namespace_type ns; enum mlx5_flow_namespace_type ns;
u32 group_num; u32 group_num;
u32 flags; u32 flags;
int fs_base_prio;
int fs_base_level;
}; };
struct fs_chain { struct fs_chain {
...@@ -145,7 +145,7 @@ void ...@@ -145,7 +145,7 @@ void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains, mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft) struct mlx5_flow_table *ft)
{ {
tc_end_ft(chains) = ft; chains_end_ft(chains) = ft;
} }
static struct mlx5_flow_table * static struct mlx5_flow_table *
...@@ -164,11 +164,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains, ...@@ -164,11 +164,11 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE; sz = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? FT_TBL_SZ : POOL_NEXT_SIZE;
ft_attr.max_fte = sz; ft_attr.max_fte = sz;
/* We use tc_default_ft(chains) as the table's next_ft till /* We use chains_default_ft(chains) as the table's next_ft till
* ignore_flow_level is allowed on FT creation and not just for FTEs. * ignore_flow_level is allowed on FT creation and not just for FTEs.
* Instead caller should add an explicit miss rule if needed. * Instead caller should add an explicit miss rule if needed.
*/ */
ft_attr.next_ft = tc_default_ft(chains); ft_attr.next_ft = chains_default_ft(chains);
/* The root table(chain 0, prio 1, level 0) is required to be /* The root table(chain 0, prio 1, level 0) is required to be
* connected to the previous fs_core managed prio. * connected to the previous fs_core managed prio.
...@@ -177,22 +177,22 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains, ...@@ -177,22 +177,22 @@ mlx5_chains_create_table(struct mlx5_fs_chains *chains,
*/ */
if (!mlx5_chains_ignore_flow_level_supported(chains) || if (!mlx5_chains_ignore_flow_level_supported(chains) ||
(chain == 0 && prio == 1 && level == 0)) { (chain == 0 && prio == 1 && level == 0)) {
ft_attr.level = level; ft_attr.level = chains->fs_base_level;
ft_attr.prio = prio - 1; ft_attr.prio = chains->fs_base_prio;
ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ? ns = (chains->ns == MLX5_FLOW_NAMESPACE_FDB) ?
mlx5_get_fdb_sub_ns(chains->dev, chain) : mlx5_get_fdb_sub_ns(chains->dev, chain) :
mlx5_get_flow_namespace(chains->dev, chains->ns); mlx5_get_flow_namespace(chains->dev, chains->ns);
} else { } else {
ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED; ft_attr.flags |= MLX5_FLOW_TABLE_UNMANAGED;
ft_attr.prio = ns_to_chains_fs_prio(chains->ns); ft_attr.prio = chains->fs_base_prio;
/* Firmware doesn't allow us to create another level 0 table, /* Firmware doesn't allow us to create another level 0 table,
* so we create all unmanaged tables as level 1. * so we create all unmanaged tables as level 1 (base + 1).
* *
* To connect them, we use explicit miss rules with * To connect them, we use explicit miss rules with
* ignore_flow_level. Caller is responsible to create * ignore_flow_level. Caller is responsible to create
* these rules (if needed). * these rules (if needed).
*/ */
ft_attr.level = 1; ft_attr.level = chains->fs_base_level + 1;
ns = mlx5_get_flow_namespace(chains->dev, chains->ns); ns = mlx5_get_flow_namespace(chains->dev, chains->ns);
} }
...@@ -220,7 +220,8 @@ create_chain_restore(struct fs_chain *chain) ...@@ -220,7 +220,8 @@ create_chain_restore(struct fs_chain *chain)
int err; int err;
if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) || if (chain->chain == mlx5_chains_get_nf_ft_chain(chains) ||
!mlx5_chains_prios_supported(chains)) !mlx5_chains_prios_supported(chains) ||
!chains->chains_mapping)
return 0; return 0;
err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index); err = mlx5_chains_get_chain_mapping(chains, chain->chain, &index);
...@@ -380,7 +381,7 @@ mlx5_chains_add_miss_rule(struct fs_chain *chain, ...@@ -380,7 +381,7 @@ mlx5_chains_add_miss_rule(struct fs_chain *chain,
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = next_ft; dest.ft = next_ft;
if (next_ft == tc_end_ft(chains) && if (chains->chains_mapping && next_ft == chains_end_ft(chains) &&
chain->chain != mlx5_chains_get_nf_ft_chain(chains) && chain->chain != mlx5_chains_get_nf_ft_chain(chains) &&
mlx5_chains_prios_supported(chains)) { mlx5_chains_prios_supported(chains)) {
act.modify_hdr = chain->miss_modify_hdr; act.modify_hdr = chain->miss_modify_hdr;
...@@ -494,8 +495,8 @@ mlx5_chains_create_prio(struct mlx5_fs_chains *chains, ...@@ -494,8 +495,8 @@ mlx5_chains_create_prio(struct mlx5_fs_chains *chains,
/* Default miss for each chain: */ /* Default miss for each chain: */
next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ? next_ft = (chain == mlx5_chains_get_nf_ft_chain(chains)) ?
tc_default_ft(chains) : chains_default_ft(chains) :
tc_end_ft(chains); chains_end_ft(chains);
list_for_each(pos, &chain_s->prios_list) { list_for_each(pos, &chain_s->prios_list) {
struct prio *p = list_entry(pos, struct prio, list); struct prio *p = list_entry(pos, struct prio, list);
...@@ -681,7 +682,7 @@ mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio, ...@@ -681,7 +682,7 @@ mlx5_chains_put_table(struct mlx5_fs_chains *chains, u32 chain, u32 prio,
struct mlx5_flow_table * struct mlx5_flow_table *
mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains) mlx5_chains_get_tc_end_ft(struct mlx5_fs_chains *chains)
{ {
return tc_end_ft(chains); return chains_end_ft(chains);
} }
struct mlx5_flow_table * struct mlx5_flow_table *
...@@ -718,48 +719,38 @@ mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains, ...@@ -718,48 +719,38 @@ mlx5_chains_destroy_global_table(struct mlx5_fs_chains *chains,
static struct mlx5_fs_chains * static struct mlx5_fs_chains *
mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr) mlx5_chains_init(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{ {
struct mlx5_fs_chains *chains_priv; struct mlx5_fs_chains *chains;
u32 max_flow_counter;
int err; int err;
chains_priv = kzalloc(sizeof(*chains_priv), GFP_KERNEL); chains = kzalloc(sizeof(*chains), GFP_KERNEL);
if (!chains_priv) if (!chains)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) | chains->dev = dev;
MLX5_CAP_GEN(dev, max_flow_counter_15_0); chains->flags = attr->flags;
chains->ns = attr->ns;
mlx5_core_dbg(dev, chains->group_num = attr->max_grp_num;
"Init flow table chains, max counters(%d), groups(%d), max flow table size(%d)\n", chains->chains_mapping = attr->mapping;
max_flow_counter, attr->max_grp_num, attr->max_ft_sz); chains->fs_base_prio = attr->fs_base_prio;
chains->fs_base_level = attr->fs_base_level;
chains_priv->dev = dev; chains_default_ft(chains) = chains_end_ft(chains) = attr->default_ft;
chains_priv->flags = attr->flags;
chains_priv->ns = attr->ns;
chains_priv->group_num = attr->max_grp_num;
chains_priv->chains_mapping = attr->mapping;
tc_default_ft(chains_priv) = tc_end_ft(chains_priv) = attr->default_ft;
mlx5_core_info(dev, "Supported tc offload range - chains: %u, prios: %u\n", err = rhashtable_init(&chains_ht(chains), &chain_params);
mlx5_chains_get_chain_range(chains_priv),
mlx5_chains_get_prio_range(chains_priv));
err = rhashtable_init(&chains_ht(chains_priv), &chain_params);
if (err) if (err)
goto init_chains_ht_err; goto init_chains_ht_err;
err = rhashtable_init(&prios_ht(chains_priv), &prio_params); err = rhashtable_init(&prios_ht(chains), &prio_params);
if (err) if (err)
goto init_prios_ht_err; goto init_prios_ht_err;
mutex_init(&chains_lock(chains_priv)); mutex_init(&chains_lock(chains));
return chains_priv; return chains;
init_prios_ht_err: init_prios_ht_err:
rhashtable_destroy(&chains_ht(chains_priv)); rhashtable_destroy(&chains_ht(chains));
init_chains_ht_err: init_chains_ht_err:
kfree(chains_priv); kfree(chains);
return ERR_PTR(err); return ERR_PTR(err);
} }
...@@ -808,3 +799,9 @@ mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping) ...@@ -808,3 +799,9 @@ mlx5_chains_put_chain_mapping(struct mlx5_fs_chains *chains, u32 chain_mapping)
return mapping_remove(ctx, chain_mapping); return mapping_remove(ctx, chain_mapping);
} }
void
mlx5_chains_print_info(struct mlx5_fs_chains *chains)
{
mlx5_core_dbg(chains->dev, "Flow table chains groups(%d)\n", chains->group_num);
}
...@@ -17,8 +17,9 @@ enum mlx5_chains_flags { ...@@ -17,8 +17,9 @@ enum mlx5_chains_flags {
struct mlx5_chains_attr { struct mlx5_chains_attr {
enum mlx5_flow_namespace_type ns; enum mlx5_flow_namespace_type ns;
int fs_base_prio;
int fs_base_level;
u32 flags; u32 flags;
u32 max_ft_sz;
u32 max_grp_num; u32 max_grp_num;
struct mlx5_flow_table *default_ft; struct mlx5_flow_table *default_ft;
struct mapping_ctx *mapping; struct mapping_ctx *mapping;
...@@ -68,6 +69,8 @@ void mlx5_chains_destroy(struct mlx5_fs_chains *chains); ...@@ -68,6 +69,8 @@ void mlx5_chains_destroy(struct mlx5_fs_chains *chains);
void void
mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains, mlx5_chains_set_end_ft(struct mlx5_fs_chains *chains,
struct mlx5_flow_table *ft); struct mlx5_flow_table *ft);
void
mlx5_chains_print_info(struct mlx5_fs_chains *chains);
#else /* CONFIG_MLX5_CLS_ACT */ #else /* CONFIG_MLX5_CLS_ACT */
...@@ -89,7 +92,9 @@ static inline struct mlx5_fs_chains * ...@@ -89,7 +92,9 @@ static inline struct mlx5_fs_chains *
mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr) mlx5_chains_create(struct mlx5_core_dev *dev, struct mlx5_chains_attr *attr)
{ return NULL; } { return NULL; }
static inline void static inline void
mlx5_chains_destroy(struct mlx5_fs_chains *chains) {}; mlx5_chains_destroy(struct mlx5_fs_chains *chains) {}
static inline void
mlx5_chains_print_info(struct mlx5_fs_chains *chains) {}
#endif /* CONFIG_MLX5_CLS_ACT */ #endif /* CONFIG_MLX5_CLS_ACT */
......
...@@ -138,6 +138,10 @@ enum { ...@@ -138,6 +138,10 @@ enum {
XFRM_DEV_OFFLOAD_PACKET, XFRM_DEV_OFFLOAD_PACKET,
}; };
enum {
XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
};
struct xfrm_dev_offload { struct xfrm_dev_offload {
struct net_device *dev; struct net_device *dev;
netdevice_tracker dev_tracker; netdevice_tracker dev_tracker;
...@@ -145,6 +149,7 @@ struct xfrm_dev_offload { ...@@ -145,6 +149,7 @@ struct xfrm_dev_offload {
unsigned long offload_handle; unsigned long offload_handle;
u8 dir : 2; u8 dir : 2;
u8 type : 2; u8 type : 2;
u8 flags : 2;
}; };
struct xfrm_mode { struct xfrm_mode {
......
...@@ -1272,6 +1272,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr, ...@@ -1272,6 +1272,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
xso->dir = xdo->dir; xso->dir = xdo->dir;
xso->dev = xdo->dev; xso->dev = xdo->dev;
xso->real_dev = xdo->real_dev; xso->real_dev = xdo->real_dev;
xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ;
netdev_tracker_alloc(xso->dev, &xso->dev_tracker, netdev_tracker_alloc(xso->dev, &xso->dev_tracker,
GFP_ATOMIC); GFP_ATOMIC);
error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL); error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL);
......
...@@ -901,6 +901,8 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p) ...@@ -901,6 +901,8 @@ static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
memcpy(&p->id, &x->id, sizeof(p->id)); memcpy(&p->id, &x->id, sizeof(p->id));
memcpy(&p->sel, &x->sel, sizeof(p->sel)); memcpy(&p->sel, &x->sel, sizeof(p->sel));
memcpy(&p->lft, &x->lft, sizeof(p->lft)); memcpy(&p->lft, &x->lft, sizeof(p->lft));
if (x->xso.dev)
xfrm_dev_state_update_curlft(x);
memcpy(&p->curlft, &x->curlft, sizeof(p->curlft)); memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
put_unaligned(x->stats.replay_window, &p->stats.replay_window); put_unaligned(x->stats.replay_window, &p->stats.replay_window);
put_unaligned(x->stats.replay, &p->stats.replay); put_unaligned(x->stats.replay, &p->stats.replay);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment