Commit cff9b79e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2023-01-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2023-01-18

1) From Rahul,
  1.1) extended range for PTP adjtime and adjphase
  1.2) adjphase function to support hardware-only offset control

2) From Roi, code cleanup to the TC module.

3) From Maor, TC support for Geneve and GRE with VF tunnel offload

4) Cleanups and minor updates.

* tag 'mlx5-updates-2023-01-18' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Use read lock for eswitch get callbacks
  net/mlx5e: Remove redundant allocation of spec in create indirect fwd group
  net/mlx5e: Support Geneve and GRE with VF tunnel offload
  net/mlx5: E-Switch, Fix typo for egress
  net/mlx5e: Warn when destroying mod hdr hash table that is not empty
  net/mlx5e: TC, Use common function allocating flow mod hdr or encap mod hdr
  net/mlx5e: TC, Add tc prefix to attach/detach hdr functions
  net/mlx5e: TC, Pass flow attr to attach/detach mod hdr functions
  net/mlx5e: Add warning when log WQE size is smaller than log stride size
  net/mlx5e: Fail with messages when params are not valid for XSK
  net/mlx5: E-switch, Remove redundant comment about meta rules
  net/mlx5: Add hardware extended range support for PTP adjtime and adjphase
  net/mlx5: Add adjphase function to support hardware-only offset control
  net/mlx5: Suppress error logging on UCTX creation
  net/mlx5e: Suppress Send WQEBB room warning for PAGE_SIZE >= 16KB
====================

Link: https://lore.kernel.org/r/20230118183602.124323-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 1038bfb2 efb4879f
...@@ -813,7 +813,8 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out) ...@@ -813,7 +813,8 @@ static void cmd_status_print(struct mlx5_core_dev *dev, void *in, void *out)
op_mod = MLX5_GET(mbox_in, in, op_mod); op_mod = MLX5_GET(mbox_in, in, op_mod);
uid = MLX5_GET(mbox_in, in, uid); uid = MLX5_GET(mbox_in, in, uid);
if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY) if (!uid && opcode != MLX5_CMD_OP_DESTROY_MKEY &&
opcode != MLX5_CMD_OP_CREATE_UCTX)
mlx5_cmd_out_err(dev, opcode, op_mod, out); mlx5_cmd_out_err(dev, opcode, op_mod, out);
} }
......
...@@ -47,6 +47,7 @@ void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl) ...@@ -47,6 +47,7 @@ void mlx5e_mod_hdr_tbl_init(struct mod_hdr_tbl *tbl)
void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl) void mlx5e_mod_hdr_tbl_destroy(struct mod_hdr_tbl *tbl)
{ {
WARN_ON(!hash_empty(tbl->hlist));
mutex_destroy(&tbl->lock); mutex_destroy(&tbl->lock);
} }
......
...@@ -411,9 +411,14 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, ...@@ -411,9 +411,14 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
{ {
enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk); enum mlx5e_mpwrq_umr_mode umr_mode = mlx5e_mpwrq_umr_mode(mdev, xsk);
u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk); u8 page_shift = mlx5e_mpwrq_page_shift(mdev, xsk);
u8 log_wqe_size, log_stride_size;
return mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode) - log_wqe_size = mlx5e_mpwrq_log_wqe_sz(mdev, page_shift, umr_mode);
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk); log_stride_size = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
WARN(log_wqe_size < log_stride_size,
"Log WQE size %u < log stride size %u (page shift %u, umr mode %d, xsk on? %d)\n",
log_wqe_size, log_stride_size, page_shift, umr_mode, !!xsk);
return log_wqe_size - log_stride_size;
} }
u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz) u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
...@@ -581,11 +586,16 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa ...@@ -581,11 +586,16 @@ int mlx5e_mpwrq_validate_xsk(struct mlx5_core_dev *mdev, struct mlx5e_params *pa
bool unaligned = xsk ? xsk->unaligned : false; bool unaligned = xsk ? xsk->unaligned : false;
u16 max_mtu_pkts; u16 max_mtu_pkts;
if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) if (!mlx5e_check_fragmented_striding_rq_cap(mdev, page_shift, umr_mode)) {
mlx5_core_err(mdev, "Striding RQ for XSK can't be activated with page_shift %u and umr_mode %d\n",
page_shift, umr_mode);
return -EOPNOTSUPP; return -EOPNOTSUPP;
}
if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) if (!mlx5e_rx_mpwqe_is_linear_skb(mdev, params, xsk)) {
mlx5_core_err(mdev, "Striding RQ linear mode for XSK can't be activated with current params\n");
return -EINVAL; return -EINVAL;
}
/* Current RQ length is too big for the given frame size, the /* Current RQ length is too big for the given frame size, the
* needed number of WQEs exceeds the maximum. * needed number of WQEs exceeds the maximum.
......
...@@ -95,8 +95,6 @@ struct mlx5e_tc_flow { ...@@ -95,8 +95,6 @@ struct mlx5e_tc_flow {
*/ */
struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
struct mlx5e_tc_flow *peer_flow; struct mlx5e_tc_flow *peer_flow;
struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */
struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head hairpin; /* flows sharing the same hairpin */
struct list_head peer; /* flows with peer flow */ struct list_head peer; /* flows with peer flow */
......
...@@ -745,8 +745,6 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv, ...@@ -745,8 +745,6 @@ int mlx5e_tc_tun_route_lookup(struct mlx5e_priv *priv,
if (err) if (err)
goto out; goto out;
esw_attr->rx_tun_attr->vni = MLX5_GET(fte_match_param, spec->match_value,
misc_parameters.vxlan_vni);
esw_attr->rx_tun_attr->decap_vport = vport_num; esw_attr->rx_tun_attr->decap_vport = vport_num;
} else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) { } else if (netif_is_ovs_master(attr.route_dev) && mlx5e_tc_int_port_supported(esw)) {
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv), int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
......
...@@ -1349,7 +1349,8 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv, ...@@ -1349,7 +1349,8 @@ static void mlx5e_invalidate_encap(struct mlx5e_priv *priv,
mlx5e_tc_unoffload_from_slow_path(esw, flow); mlx5e_tc_unoffload_from_slow_path(esw, flow);
else else
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr); mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
mlx5e_tc_detach_mod_hdr(priv, flow, attr);
attr->modify_hdr = NULL; attr->modify_hdr = NULL;
esw_attr->dests[flow->tmp_entry_index].flags &= esw_attr->dests[flow->tmp_entry_index].flags &=
...@@ -1405,7 +1406,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv, ...@@ -1405,7 +1406,7 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
continue; continue;
} }
err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr); err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
if (err) { if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d", mlx5_core_warn(priv->mdev, "Failed to update flow mod_hdr err=%d",
err); err);
......
...@@ -445,7 +445,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, ...@@ -445,7 +445,7 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size)
{ {
WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev)); WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < (u16)mlx5e_get_max_sq_wqebbs(mdev));
/* A WQE must not cross the page boundary, hence two conditions: /* A WQE must not cross the page boundary, hence two conditions:
* 1. Its size must not exceed the page size. * 1. Its size must not exceed the page size.
......
...@@ -7,6 +7,18 @@ ...@@ -7,6 +7,18 @@
#include "en/health.h" #include "en/health.h"
#include <net/xdp_sock_drv.h> #include <net/xdp_sock_drv.h>
static int mlx5e_legacy_rq_validate_xsk(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
{
if (!mlx5e_rx_is_linear_skb(mdev, params, xsk)) {
mlx5_core_err(mdev, "Legacy RQ linear mode for XSK can't be activated with current params\n");
return -EINVAL;
}
return 0;
}
/* The limitation of 2048 can be altered, but shouldn't go beyond the minimal /* The limitation of 2048 can be altered, but shouldn't go beyond the minimal
* stride size of striding RQ. * stride size of striding RQ.
*/ */
...@@ -17,8 +29,11 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, ...@@ -17,8 +29,11 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
struct mlx5_core_dev *mdev) struct mlx5_core_dev *mdev)
{ {
/* AF_XDP doesn't support frames larger than PAGE_SIZE. */ /* AF_XDP doesn't support frames larger than PAGE_SIZE. */
if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) if (xsk->chunk_size > PAGE_SIZE || xsk->chunk_size < MLX5E_MIN_XSK_CHUNK_SIZE) {
mlx5_core_err(mdev, "XSK chunk size %u out of bounds [%u, %lu]\n", xsk->chunk_size,
MLX5E_MIN_XSK_CHUNK_SIZE, PAGE_SIZE);
return false; return false;
}
/* frag_sz is different for regular and XSK RQs, so ensure that linear /* frag_sz is different for regular and XSK RQs, so ensure that linear
* SKB mode is possible. * SKB mode is possible.
...@@ -27,7 +42,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params, ...@@ -27,7 +42,7 @@ bool mlx5e_validate_xsk_param(struct mlx5e_params *params,
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk); return !mlx5e_mpwrq_validate_xsk(mdev, params, xsk);
default: /* MLX5_WQ_TYPE_CYCLIC */ default: /* MLX5_WQ_TYPE_CYCLIC */
return mlx5e_rx_is_linear_skb(mdev, params, xsk); return !mlx5e_legacy_rq_validate_xsk(mdev, params, xsk);
} }
} }
......
...@@ -646,36 +646,36 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) ...@@ -646,36 +646,36 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
&tc->mod_hdr; &tc->mod_hdr;
} }
static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct mlx5e_tc_flow_parse_attr *parse_attr) struct mlx5_flow_attr *attr)
{ {
struct mlx5_modify_hdr *modify_hdr;
struct mlx5e_mod_hdr_handle *mh; struct mlx5e_mod_hdr_handle *mh;
mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow), mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
mlx5e_get_flow_namespace(flow), mlx5e_get_flow_namespace(flow),
&parse_attr->mod_hdr_acts); &attr->parse_attr->mod_hdr_acts);
if (IS_ERR(mh)) if (IS_ERR(mh))
return PTR_ERR(mh); return PTR_ERR(mh);
modify_hdr = mlx5e_mod_hdr_get(mh); WARN_ON(attr->modify_hdr);
flow->attr->modify_hdr = modify_hdr; attr->modify_hdr = mlx5e_mod_hdr_get(mh);
flow->mh = mh; attr->mh = mh;
return 0; return 0;
} }
static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv, void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{ {
/* flow wasn't fully initialized */ /* flow wasn't fully initialized */
if (!flow->mh) if (!attr->mh)
return; return;
mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow), mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
flow->mh); attr->mh);
flow->mh = NULL; attr->mh = NULL;
} }
static static
...@@ -1433,7 +1433,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, ...@@ -1433,7 +1433,7 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
} }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); err = mlx5e_tc_attach_mod_hdr(priv, flow, attr);
if (err) if (err)
return err; return err;
} }
...@@ -1493,7 +1493,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, ...@@ -1493,7 +1493,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
mlx5e_detach_mod_hdr(priv, flow); mlx5e_tc_detach_mod_hdr(priv, flow, attr);
} }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
...@@ -1604,7 +1604,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, ...@@ -1604,7 +1604,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
goto err_offload; goto err_offload;
} }
flow->slow_mh = mh; flow->attr->slow_mh = mh;
flow->chain_mapping = chain_mapping; flow->chain_mapping = chain_mapping;
flow_flag_set(flow, SLOW); flow_flag_set(flow, SLOW);
...@@ -1629,6 +1629,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, ...@@ -1629,6 +1629,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5e_tc_flow *flow) struct mlx5e_tc_flow *flow)
{ {
struct mlx5e_mod_hdr_handle *slow_mh = flow->attr->slow_mh;
struct mlx5_flow_attr *slow_attr; struct mlx5_flow_attr *slow_attr;
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
...@@ -1641,16 +1642,16 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, ...@@ -1641,16 +1642,16 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
slow_attr->esw_attr->split_count = 0; slow_attr->esw_attr->split_count = 0;
slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
if (flow->slow_mh) { if (slow_mh) {
slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh); slow_attr->modify_hdr = mlx5e_mod_hdr_get(slow_mh);
} }
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
if (flow->slow_mh) { if (slow_mh) {
mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh); mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), slow_mh);
mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping); mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
flow->chain_mapping = 0; flow->chain_mapping = 0;
flow->slow_mh = NULL; flow->attr->slow_mh = NULL;
} }
flow_flag_clear(flow, SLOW); flow_flag_clear(flow, SLOW);
kfree(slow_attr); kfree(slow_attr);
...@@ -1761,26 +1762,6 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro ...@@ -1761,26 +1762,6 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro
return err; return err;
} }
int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr)
{
struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
struct mlx5_modify_hdr *mod_hdr;
mod_hdr = mlx5_modify_header_alloc(priv->mdev,
mlx5e_get_flow_namespace(flow),
mod_hdr_acts->num_actions,
mod_hdr_acts->actions);
if (IS_ERR(mod_hdr))
return PTR_ERR(mod_hdr);
WARN_ON(attr->modify_hdr);
attr->modify_hdr = mod_hdr;
return 0;
}
static int static int
set_encap_dests(struct mlx5e_priv *priv, set_encap_dests(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
...@@ -1900,7 +1881,6 @@ verify_attr_actions(u32 actions, struct netlink_ext_ack *extack) ...@@ -1900,7 +1881,6 @@ verify_attr_actions(u32 actions, struct netlink_ext_ack *extack)
static int static int
post_process_attr(struct mlx5e_tc_flow *flow, post_process_attr(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
bool is_post_act_attr,
struct netlink_ext_ack *extack) struct netlink_ext_ack *extack)
{ {
struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
...@@ -1922,27 +1902,21 @@ post_process_attr(struct mlx5e_tc_flow *flow, ...@@ -1922,27 +1902,21 @@ post_process_attr(struct mlx5e_tc_flow *flow,
} }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
if (vf_tun || is_post_act_attr) { err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr);
err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
if (err) if (err)
goto err_out; goto err_out;
} else {
err = mlx5e_attach_mod_hdr(flow->priv, flow, attr->parse_attr);
if (err)
goto err_out;
}
} }
if (attr->branch_true && if (attr->branch_true &&
attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { attr->branch_true->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_true); err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_true);
if (err) if (err)
goto err_out; goto err_out;
} }
if (attr->branch_false && if (attr->branch_false &&
attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { attr->branch_false->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr->branch_false); err = mlx5e_tc_attach_mod_hdr(flow->priv, flow, attr->branch_false);
if (err) if (err)
goto err_out; goto err_out;
} }
...@@ -2056,7 +2030,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -2056,7 +2030,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
esw_attr->int_port = int_port; esw_attr->int_port = int_port;
} }
err = post_process_attr(flow, attr, false, extack); err = post_process_attr(flow, attr, extack);
if (err) if (err)
goto err_out; goto err_out;
...@@ -2141,10 +2115,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -2141,10 +2115,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
if (vf_tun && attr->modify_hdr) mlx5e_tc_detach_mod_hdr(priv, flow, attr);
mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
else
mlx5e_detach_mod_hdr(priv, flow);
} }
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
...@@ -2624,13 +2595,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, ...@@ -2624,13 +2595,13 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
err = mlx5e_tc_set_attr_rx_tun(flow, spec); err = mlx5e_tc_set_attr_rx_tun(flow, spec);
if (err) if (err)
return err; return err;
} else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { } else if (tunnel) {
struct mlx5_flow_spec *tmp_spec; struct mlx5_flow_spec *tmp_spec;
tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL); tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
if (!tmp_spec) { if (!tmp_spec) {
NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec"); NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for tunnel tmp spec");
netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec"); netdev_warn(priv->netdev, "Failed to allocate memory for tunnel tmp spec");
return -ENOMEM; return -ENOMEM;
} }
memcpy(tmp_spec, spec, sizeof(*tmp_spec)); memcpy(tmp_spec, spec, sizeof(*tmp_spec));
...@@ -3963,7 +3934,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) ...@@ -3963,7 +3934,7 @@ alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
if (err) if (err)
goto out_free; goto out_free;
err = post_process_attr(flow, attr, true, extack); err = post_process_attr(flow, attr, extack);
if (err) if (err)
goto out_free; goto out_free;
...@@ -4530,8 +4501,7 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr) ...@@ -4530,8 +4501,7 @@ mlx5_free_flow_attr(struct mlx5e_tc_flow *flow, struct mlx5_flow_attr *attr)
if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
if (attr->modify_hdr) mlx5e_tc_detach_mod_hdr(flow->priv, flow, attr);
mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
} }
} }
...@@ -4653,9 +4623,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, ...@@ -4653,9 +4623,6 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err) if (err)
goto err_free; goto err_free;
/* always set IP version for indirect table handling */
flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err) if (err)
goto err_free; goto err_free;
......
...@@ -71,6 +71,8 @@ struct mlx5_flow_attr { ...@@ -71,6 +71,8 @@ struct mlx5_flow_attr {
u32 action; u32 action;
struct mlx5_fc *counter; struct mlx5_fc *counter;
struct mlx5_modify_hdr *modify_hdr; struct mlx5_modify_hdr *modify_hdr;
struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */
struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */
struct mlx5_ct_attr ct_attr; struct mlx5_ct_attr ct_attr;
struct mlx5e_sample_attr sample_attr; struct mlx5e_sample_attr sample_attr;
struct mlx5e_meter_attr meter_attr; struct mlx5e_meter_attr meter_attr;
...@@ -82,7 +84,6 @@ struct mlx5_flow_attr { ...@@ -82,7 +84,6 @@ struct mlx5_flow_attr {
struct mlx5_flow_table *dest_ft; struct mlx5_flow_table *dest_ft;
u8 inner_match_level; u8 inner_match_level;
u8 outer_match_level; u8 outer_match_level;
u8 ip_version;
u8 tun_ip_version; u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */ int tunnel_id; /* mapped tunnel id */
u32 flags; u32 flags;
...@@ -134,7 +135,6 @@ struct mlx5_rx_tun_attr { ...@@ -134,7 +135,6 @@ struct mlx5_rx_tun_attr {
__be32 v4; __be32 v4;
struct in6_addr v6; struct in6_addr v6;
} dst_ip; /* Valid if decap_vport is not zero */ } dst_ip; /* Valid if decap_vport is not zero */
u32 vni;
}; };
#define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16 #define MLX5E_TC_TABLE_CHAIN_TAG_BITS 16
...@@ -285,7 +285,11 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev, ...@@ -285,7 +285,11 @@ int mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
enum mlx5e_tc_attr_to_reg type, enum mlx5e_tc_attr_to_reg type,
u32 data); u32 data);
int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv, int mlx5e_tc_attach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr);
void mlx5e_tc_detach_mod_hdr(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow, struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr); struct mlx5_flow_attr *attr);
......
...@@ -16,18 +16,12 @@ ...@@ -16,18 +16,12 @@
#include "lib/fs_chains.h" #include "lib/fs_chains.h"
#include "en/mod_hdr.h" #include "en/mod_hdr.h"
#define MLX5_ESW_INDIR_TABLE_SIZE 128 #define MLX5_ESW_INDIR_TABLE_SIZE 2
#define MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX (MLX5_ESW_INDIR_TABLE_SIZE - 2) #define MLX5_ESW_INDIR_TABLE_RECIRC_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 2)
#define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1) #define MLX5_ESW_INDIR_TABLE_FWD_IDX (MLX5_ESW_INDIR_TABLE_SIZE - 1)
struct mlx5_esw_indir_table_rule { struct mlx5_esw_indir_table_rule {
struct list_head list;
struct mlx5_flow_handle *handle; struct mlx5_flow_handle *handle;
union {
__be32 v4;
struct in6_addr v6;
} dst_ip;
u32 vni;
struct mlx5_modify_hdr *mh; struct mlx5_modify_hdr *mh;
refcount_t refcnt; refcount_t refcnt;
}; };
...@@ -38,12 +32,10 @@ struct mlx5_esw_indir_table_entry { ...@@ -38,12 +32,10 @@ struct mlx5_esw_indir_table_entry {
struct mlx5_flow_group *recirc_grp; struct mlx5_flow_group *recirc_grp;
struct mlx5_flow_group *fwd_grp; struct mlx5_flow_group *fwd_grp;
struct mlx5_flow_handle *fwd_rule; struct mlx5_flow_handle *fwd_rule;
struct list_head recirc_rules; struct mlx5_esw_indir_table_rule *recirc_rule;
int recirc_cnt;
int fwd_ref; int fwd_ref;
u16 vport; u16 vport;
u8 ip_version;
}; };
struct mlx5_esw_indir_table { struct mlx5_esw_indir_table {
...@@ -89,7 +81,6 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw, ...@@ -89,7 +81,6 @@ mlx5_esw_indir_table_needed(struct mlx5_eswitch *esw,
return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK && return esw_attr->in_rep->vport == MLX5_VPORT_UPLINK &&
vf_sf_vport && vf_sf_vport &&
esw->dev == dest_mdev && esw->dev == dest_mdev &&
attr->ip_version &&
attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE; attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE;
} }
...@@ -101,27 +92,8 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr) ...@@ -101,27 +92,8 @@ mlx5_esw_indir_table_decap_vport(struct mlx5_flow_attr *attr)
return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0; return esw_attr->rx_tun_attr ? esw_attr->rx_tun_attr->decap_vport : 0;
} }
static struct mlx5_esw_indir_table_rule *
mlx5_esw_indir_table_rule_lookup(struct mlx5_esw_indir_table_entry *e,
struct mlx5_esw_flow_attr *attr)
{
struct mlx5_esw_indir_table_rule *rule;
list_for_each_entry(rule, &e->recirc_rules, list)
if (rule->vni == attr->rx_tun_attr->vni &&
!memcmp(&rule->dst_ip, &attr->rx_tun_attr->dst_ip,
sizeof(attr->rx_tun_attr->dst_ip)))
goto found;
return NULL;
found:
refcount_inc(&rule->refcnt);
return rule;
}
static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct mlx5_flow_spec *spec,
struct mlx5_esw_indir_table_entry *e) struct mlx5_esw_indir_table_entry *e)
{ {
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
...@@ -130,73 +102,18 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, ...@@ -130,73 +102,18 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5_esw_indir_table_rule *rule; struct mlx5_esw_indir_table_rule *rule;
struct mlx5_flow_act flow_act = {}; struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *rule_spec;
struct mlx5_flow_handle *handle; struct mlx5_flow_handle *handle;
int err = 0; int err = 0;
u32 data; u32 data;
rule = mlx5_esw_indir_table_rule_lookup(e, esw_attr); if (e->recirc_rule) {
if (rule) refcount_inc(&e->recirc_rule->refcnt);
return 0; return 0;
if (e->recirc_cnt == MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX)
return -EINVAL;
rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
if (!rule_spec)
return -ENOMEM;
rule = kzalloc(sizeof(*rule), GFP_KERNEL);
if (!rule) {
err = -ENOMEM;
goto out;
}
rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS |
MLX5_MATCH_MISC_PARAMETERS_2;
if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version)) {
MLX5_SET(fte_match_param, rule_spec->match_criteria,
outer_headers.ip_version, 0xf);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version,
attr->ip_version);
} else if (attr->ip_version) {
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.ethertype);
MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ethertype,
(attr->ip_version == 4 ? ETH_P_IP : ETH_P_IPV6));
} else {
err = -EOPNOTSUPP;
goto err_ethertype;
}
if (attr->ip_version == 4) {
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
MLX5_SET(fte_match_param, rule_spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
ntohl(esw_attr->rx_tun_attr->dst_ip.v4));
} else if (attr->ip_version == 6) {
int len = sizeof(struct in6_addr);
memset(MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff, len);
memcpy(MLX5_ADDR_OF(fte_match_param, rule_spec->match_value,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
&esw_attr->rx_tun_attr->dst_ip.v6, len);
} }
MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, rule = kzalloc(sizeof(*rule), GFP_KERNEL);
misc_parameters.vxlan_vni); if (!rule)
MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters.vxlan_vni, return -ENOMEM;
MLX5_GET(fte_match_param, spec->match_value, misc_parameters.vxlan_vni));
MLX5_SET(fte_match_param, rule_spec->match_criteria,
misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_for_match(esw_attr->in_mdev->priv.eswitch,
MLX5_VPORT_UPLINK));
/* Modify flow source to recirculate packet */ /* Modify flow source to recirculate packet */
data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport); data = mlx5_eswitch_get_vport_metadata_for_set(esw, esw_attr->rx_tun_attr->decap_vport);
...@@ -219,13 +136,14 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, ...@@ -219,13 +136,14 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND; flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND;
flow_act.fg = e->recirc_grp;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = mlx5_chains_get_table(chains, 0, 1, 0); dest.ft = mlx5_chains_get_table(chains, 0, 1, 0);
if (IS_ERR(dest.ft)) { if (IS_ERR(dest.ft)) {
err = PTR_ERR(dest.ft); err = PTR_ERR(dest.ft);
goto err_table; goto err_table;
} }
handle = mlx5_add_flow_rules(e->ft, rule_spec, &flow_act, &dest, 1); handle = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(handle)) { if (IS_ERR(handle)) {
err = PTR_ERR(handle); err = PTR_ERR(handle);
goto err_handle; goto err_handle;
...@@ -233,14 +151,10 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, ...@@ -233,14 +151,10 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
mlx5e_mod_hdr_dealloc(&mod_acts); mlx5e_mod_hdr_dealloc(&mod_acts);
rule->handle = handle; rule->handle = handle;
rule->vni = esw_attr->rx_tun_attr->vni;
rule->mh = flow_act.modify_hdr; rule->mh = flow_act.modify_hdr;
memcpy(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
sizeof(esw_attr->rx_tun_attr->dst_ip));
refcount_set(&rule->refcnt, 1); refcount_set(&rule->refcnt, 1);
list_add(&rule->list, &e->recirc_rules); e->recirc_rule = rule;
e->recirc_cnt++; return 0;
goto out;
err_handle: err_handle:
mlx5_chains_put_table(chains, 0, 1, 0); mlx5_chains_put_table(chains, 0, 1, 0);
...@@ -250,89 +164,44 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw, ...@@ -250,89 +164,44 @@ static int mlx5_esw_indir_table_rule_get(struct mlx5_eswitch *esw,
err_mod_hdr_regc1: err_mod_hdr_regc1:
mlx5e_mod_hdr_dealloc(&mod_acts); mlx5e_mod_hdr_dealloc(&mod_acts);
err_mod_hdr_regc0: err_mod_hdr_regc0:
err_ethertype:
kfree(rule); kfree(rule);
out:
kvfree(rule_spec);
return err; return err;
} }
static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw, static void mlx5_esw_indir_table_rule_put(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
struct mlx5_esw_indir_table_entry *e) struct mlx5_esw_indir_table_entry *e)
{ {
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; struct mlx5_esw_indir_table_rule *rule = e->recirc_rule;
struct mlx5_fs_chains *chains = esw_chains(esw); struct mlx5_fs_chains *chains = esw_chains(esw);
struct mlx5_esw_indir_table_rule *rule;
list_for_each_entry(rule, &e->recirc_rules, list)
if (rule->vni == esw_attr->rx_tun_attr->vni &&
!memcmp(&rule->dst_ip, &esw_attr->rx_tun_attr->dst_ip,
sizeof(esw_attr->rx_tun_attr->dst_ip)))
goto found;
if (!rule)
return; return;
found:
if (!refcount_dec_and_test(&rule->refcnt)) if (!refcount_dec_and_test(&rule->refcnt))
return; return;
mlx5_del_flow_rules(rule->handle); mlx5_del_flow_rules(rule->handle);
mlx5_chains_put_table(chains, 0, 1, 0); mlx5_chains_put_table(chains, 0, 1, 0);
mlx5_modify_header_dealloc(esw->dev, rule->mh); mlx5_modify_header_dealloc(esw->dev, rule->mh);
list_del(&rule->list);
kfree(rule); kfree(rule);
e->recirc_cnt--; e->recirc_rule = NULL;
} }
static int mlx5_create_indir_recirc_group(struct mlx5_eswitch *esw, static int mlx5_create_indir_recirc_group(struct mlx5_esw_indir_table_entry *e)
struct mlx5_flow_attr *attr,
struct mlx5_flow_spec *spec,
struct mlx5_esw_indir_table_entry *e)
{ {
int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
u32 *in, *match; u32 *in;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS |
MLX5_MATCH_MISC_PARAMETERS | MLX5_MATCH_MISC_PARAMETERS_2);
match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
if (MLX5_CAP_FLOWTABLE_NIC_RX(esw->dev, ft_field_support.outer_ip_version))
MLX5_SET(fte_match_param, match, outer_headers.ip_version, 0xf);
else
MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ethertype);
if (attr->ip_version == 4) {
MLX5_SET_TO_ONES(fte_match_param, match,
outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
} else if (attr->ip_version == 6) {
memset(MLX5_ADDR_OF(fte_match_param, match,
outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
0xff, sizeof(struct in6_addr));
} else {
err = -EOPNOTSUPP;
goto out;
}
MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters.vxlan_vni);
MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
mlx5_eswitch_get_vport_metadata_mask());
MLX5_SET(create_flow_group_in, in, start_flow_index, 0); MLX5_SET(create_flow_group_in, in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX_MAX); MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_RECIRC_IDX);
e->recirc_grp = mlx5_create_flow_group(e->ft, in); e->recirc_grp = mlx5_create_flow_group(e->ft, in);
if (IS_ERR(e->recirc_grp)) { if (IS_ERR(e->recirc_grp))
err = PTR_ERR(e->recirc_grp); err = PTR_ERR(e->recirc_grp);
goto out;
}
INIT_LIST_HEAD(&e->recirc_rules);
e->recirc_cnt = 0;
out:
kvfree(in); kvfree(in);
return err; return err;
} }
...@@ -343,19 +212,12 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, ...@@ -343,19 +212,12 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); int err = 0, inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5_flow_act flow_act = {}; struct mlx5_flow_act flow_act = {};
struct mlx5_flow_spec *spec;
u32 *in; u32 *in;
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) if (!in)
return -ENOMEM; return -ENOMEM;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec) {
kvfree(in);
return -ENOMEM;
}
/* Hold one entry */ /* Hold one entry */
MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX); MLX5_SET(create_flow_group_in, in, start_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX); MLX5_SET(create_flow_group_in, in, end_flow_index, MLX5_ESW_INDIR_TABLE_FWD_IDX);
...@@ -366,25 +228,25 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, ...@@ -366,25 +228,25 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw,
} }
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_act.fg = e->fwd_grp;
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport.num = e->vport; dest.vport.num = e->vport;
dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1); e->fwd_rule = mlx5_add_flow_rules(e->ft, NULL, &flow_act, &dest, 1);
if (IS_ERR(e->fwd_rule)) { if (IS_ERR(e->fwd_rule)) {
mlx5_destroy_flow_group(e->fwd_grp); mlx5_destroy_flow_group(e->fwd_grp);
err = PTR_ERR(e->fwd_rule); err = PTR_ERR(e->fwd_rule);
} }
err_out: err_out:
kvfree(spec);
kvfree(in); kvfree(in);
return err; return err;
} }
static struct mlx5_esw_indir_table_entry * static struct mlx5_esw_indir_table_entry *
mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr, mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
struct mlx5_flow_spec *spec, u16 vport, bool decap) u16 vport, bool decap)
{ {
struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_table_attr ft_attr = {};
struct mlx5_flow_namespace *root_ns; struct mlx5_flow_namespace *root_ns;
...@@ -412,15 +274,14 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att ...@@ -412,15 +274,14 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
} }
e->ft = ft; e->ft = ft;
e->vport = vport; e->vport = vport;
e->ip_version = attr->ip_version;
e->fwd_ref = !decap; e->fwd_ref = !decap;
err = mlx5_create_indir_recirc_group(esw, attr, spec, e); err = mlx5_create_indir_recirc_group(e);
if (err) if (err)
goto recirc_grp_err; goto recirc_grp_err;
if (decap) { if (decap) {
err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); err = mlx5_esw_indir_table_rule_get(esw, attr, e);
if (err) if (err)
goto recirc_rule_err; goto recirc_rule_err;
} }
...@@ -430,13 +291,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att ...@@ -430,13 +291,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
goto fwd_grp_err; goto fwd_grp_err;
hash_add(esw->fdb_table.offloads.indir->table, &e->hlist, hash_add(esw->fdb_table.offloads.indir->table, &e->hlist,
vport << 16 | attr->ip_version); vport << 16);
return e; return e;
fwd_grp_err: fwd_grp_err:
if (decap) if (decap)
mlx5_esw_indir_table_rule_put(esw, attr, e); mlx5_esw_indir_table_rule_put(esw, e);
recirc_rule_err: recirc_rule_err:
mlx5_destroy_flow_group(e->recirc_grp); mlx5_destroy_flow_group(e->recirc_grp);
recirc_grp_err: recirc_grp_err:
...@@ -447,13 +308,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att ...@@ -447,13 +308,13 @@ mlx5_esw_indir_table_entry_create(struct mlx5_eswitch *esw, struct mlx5_flow_att
} }
static struct mlx5_esw_indir_table_entry * static struct mlx5_esw_indir_table_entry *
mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_version) mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport)
{ {
struct mlx5_esw_indir_table_entry *e; struct mlx5_esw_indir_table_entry *e;
u32 key = vport << 16 | ip_version; u32 key = vport << 16;
hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key) hash_for_each_possible(esw->fdb_table.offloads.indir->table, e, hlist, key)
if (e->vport == vport && e->ip_version == ip_version) if (e->vport == vport)
return e; return e;
return NULL; return NULL;
...@@ -461,24 +322,23 @@ mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_ver ...@@ -461,24 +322,23 @@ mlx5_esw_indir_table_entry_lookup(struct mlx5_eswitch *esw, u16 vport, u8 ip_ver
struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct mlx5_flow_spec *spec,
u16 vport, bool decap) u16 vport, bool decap)
{ {
struct mlx5_esw_indir_table_entry *e; struct mlx5_esw_indir_table_entry *e;
int err; int err;
mutex_lock(&esw->fdb_table.offloads.indir->lock); mutex_lock(&esw->fdb_table.offloads.indir->lock);
e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); e = mlx5_esw_indir_table_entry_lookup(esw, vport);
if (e) { if (e) {
if (!decap) { if (!decap) {
e->fwd_ref++; e->fwd_ref++;
} else { } else {
err = mlx5_esw_indir_table_rule_get(esw, attr, spec, e); err = mlx5_esw_indir_table_rule_get(esw, attr, e);
if (err) if (err)
goto out_err; goto out_err;
} }
} else { } else {
e = mlx5_esw_indir_table_entry_create(esw, attr, spec, vport, decap); e = mlx5_esw_indir_table_entry_create(esw, attr, vport, decap);
if (IS_ERR(e)) { if (IS_ERR(e)) {
err = PTR_ERR(e); err = PTR_ERR(e);
esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err); esw_warn(esw->dev, "Failed to create indirection table, err %d.\n", err);
...@@ -494,22 +354,21 @@ struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, ...@@ -494,22 +354,21 @@ struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
} }
void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
u16 vport, bool decap) u16 vport, bool decap)
{ {
struct mlx5_esw_indir_table_entry *e; struct mlx5_esw_indir_table_entry *e;
mutex_lock(&esw->fdb_table.offloads.indir->lock); mutex_lock(&esw->fdb_table.offloads.indir->lock);
e = mlx5_esw_indir_table_entry_lookup(esw, vport, attr->ip_version); e = mlx5_esw_indir_table_entry_lookup(esw, vport);
if (!e) if (!e)
goto out; goto out;
if (!decap) if (!decap)
e->fwd_ref--; e->fwd_ref--;
else else
mlx5_esw_indir_table_rule_put(esw, attr, e); mlx5_esw_indir_table_rule_put(esw, e);
if (e->fwd_ref || e->recirc_cnt) if (e->fwd_ref || e->recirc_rule)
goto out; goto out;
hash_del(&e->hlist); hash_del(&e->hlist);
......
...@@ -13,10 +13,8 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir); ...@@ -13,10 +13,8 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir);
struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, struct mlx5_flow_table *mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct mlx5_flow_spec *spec,
u16 vport, bool decap); u16 vport, bool decap);
void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, void mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
u16 vport, bool decap); u16 vport, bool decap);
bool bool
...@@ -44,7 +42,6 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir) ...@@ -44,7 +42,6 @@ mlx5_esw_indir_table_destroy(struct mlx5_esw_indir_table *indir)
static inline struct mlx5_flow_table * static inline struct mlx5_flow_table *
mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct mlx5_flow_spec *spec,
u16 vport, bool decap) u16 vport, bool decap)
{ {
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
...@@ -52,7 +49,6 @@ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw, ...@@ -52,7 +49,6 @@ mlx5_esw_indir_table_get(struct mlx5_eswitch *esw,
static inline void static inline void
mlx5_esw_indir_table_put(struct mlx5_eswitch *esw, mlx5_esw_indir_table_put(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr,
u16 vport, bool decap) u16 vport, bool decap)
{ {
} }
......
...@@ -1250,7 +1250,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw) ...@@ -1250,7 +1250,7 @@ static int mlx5_esw_acls_ns_init(struct mlx5_eswitch *esw)
if (err) if (err)
return err; return err;
} else { } else {
esw_warn(dev, "engress ACL is not supported by FW\n"); esw_warn(dev, "egress ACL is not supported by FW\n");
} }
if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) { if (MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support)) {
...@@ -1406,9 +1406,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf) ...@@ -1406,9 +1406,7 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs); mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
if (clear_vf) if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw); mlx5_eswitch_clear_vf_vports_info(esw);
/* If disabling sriov in switchdev mode, free meta rules here
* because it depends on num_vfs.
*/
if (esw->mode == MLX5_ESWITCH_OFFLOADS) { if (esw->mode == MLX5_ESWITCH_OFFLOADS) {
struct devlink *devlink = priv_to_devlink(esw->dev); struct devlink *devlink = priv_to_devlink(esw->dev);
......
...@@ -179,15 +179,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw, ...@@ -179,15 +179,14 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
static int static int
esw_setup_decap_indir(struct mlx5_eswitch *esw, esw_setup_decap_indir(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr)
struct mlx5_flow_spec *spec)
{ {
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE)) if (!(attr->flags & MLX5_ATTR_FLAG_SRC_REWRITE))
return -EOPNOTSUPP; return -EOPNOTSUPP;
ft = mlx5_esw_indir_table_get(esw, attr, spec, ft = mlx5_esw_indir_table_get(esw, attr,
mlx5_esw_indir_table_decap_vport(attr), true); mlx5_esw_indir_table_decap_vport(attr), true);
return PTR_ERR_OR_ZERO(ft); return PTR_ERR_OR_ZERO(ft);
} }
...@@ -197,7 +196,7 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw, ...@@ -197,7 +196,7 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr) struct mlx5_flow_attr *attr)
{ {
if (mlx5_esw_indir_table_decap_vport(attr)) if (mlx5_esw_indir_table_decap_vport(attr))
mlx5_esw_indir_table_put(esw, attr, mlx5_esw_indir_table_put(esw,
mlx5_esw_indir_table_decap_vport(attr), mlx5_esw_indir_table_decap_vport(attr),
true); true);
} }
...@@ -235,7 +234,6 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest, ...@@ -235,7 +234,6 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct mlx5_flow_spec *spec,
int i) int i)
{ {
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
...@@ -243,7 +241,7 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest, ...@@ -243,7 +241,7 @@ esw_setup_ft_dest(struct mlx5_flow_destination *dest,
dest[i].ft = attr->dest_ft; dest[i].ft = attr->dest_ft;
if (mlx5_esw_indir_table_decap_vport(attr)) if (mlx5_esw_indir_table_decap_vport(attr))
return esw_setup_decap_indir(esw, attr, spec); return esw_setup_decap_indir(esw, attr);
return 0; return 0;
} }
...@@ -298,7 +296,7 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_ ...@@ -298,7 +296,7 @@ static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_
mlx5_chains_put_table(chains, 0, 1, 0); mlx5_chains_put_table(chains, 0, 1, 0);
else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport, else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
esw_attr->dests[i].mdev)) esw_attr->dests[i].mdev))
mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport, mlx5_esw_indir_table_put(esw, esw_attr->dests[i].rep->vport,
false); false);
} }
...@@ -384,7 +382,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, ...@@ -384,7 +382,6 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
struct mlx5_flow_act *flow_act, struct mlx5_flow_act *flow_act,
struct mlx5_eswitch *esw, struct mlx5_eswitch *esw,
struct mlx5_flow_attr *attr, struct mlx5_flow_attr *attr,
struct mlx5_flow_spec *spec,
bool ignore_flow_lvl, bool ignore_flow_lvl,
int *i) int *i)
{ {
...@@ -399,7 +396,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, ...@@ -399,7 +396,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec, dest[*i].ft = mlx5_esw_indir_table_get(esw, attr,
esw_attr->dests[j].rep->vport, false); esw_attr->dests[j].rep->vport, false);
if (IS_ERR(dest[*i].ft)) { if (IS_ERR(dest[*i].ft)) {
err = PTR_ERR(dest[*i].ft); err = PTR_ERR(dest[*i].ft);
...@@ -408,7 +405,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest, ...@@ -408,7 +405,7 @@ esw_setup_indir_table(struct mlx5_flow_destination *dest,
} }
if (mlx5_esw_indir_table_decap_vport(attr)) { if (mlx5_esw_indir_table_decap_vport(attr)) {
err = esw_setup_decap_indir(esw, attr, spec); err = esw_setup_decap_indir(esw, attr);
if (err) if (err)
goto err_indir_tbl_get; goto err_indir_tbl_get;
} }
...@@ -511,14 +508,14 @@ esw_setup_dests(struct mlx5_flow_destination *dest, ...@@ -511,14 +508,14 @@ esw_setup_dests(struct mlx5_flow_destination *dest,
err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i); err = esw_setup_mtu_dest(dest, &attr->meter_attr, *i);
(*i)++; (*i)++;
} else if (esw_is_indir_table(esw, attr)) { } else if (esw_is_indir_table(esw, attr)) {
err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i); err = esw_setup_indir_table(dest, flow_act, esw, attr, true, i);
} else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) { } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i); err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
} else { } else {
*i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i); *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
if (attr->dest_ft) { if (attr->dest_ft) {
err = esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); err = esw_setup_ft_dest(dest, flow_act, esw, attr, *i);
(*i)++; (*i)++;
} else if (attr->dest_chain) { } else if (attr->dest_chain) {
err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
...@@ -727,7 +724,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, ...@@ -727,7 +724,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
for (i = 0; i < esw_attr->split_count; i++) { for (i = 0; i < esw_attr->split_count; i++) {
if (esw_is_indir_table(esw, attr)) if (esw_is_indir_table(esw, attr))
err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i); err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i);
else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
&i); &i);
...@@ -3575,9 +3572,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) ...@@ -3575,9 +3572,9 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (IS_ERR(esw)) if (IS_ERR(esw))
return PTR_ERR(esw); return PTR_ERR(esw);
down_write(&esw->mode_lock); down_read(&esw->mode_lock);
err = esw_mode_to_devlink(esw->mode, mode); err = esw_mode_to_devlink(esw->mode, mode);
up_write(&esw->mode_lock); up_read(&esw->mode_lock);
return err; return err;
} }
...@@ -3675,9 +3672,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode) ...@@ -3675,9 +3672,9 @@ int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
if (IS_ERR(esw)) if (IS_ERR(esw))
return PTR_ERR(esw); return PTR_ERR(esw);
down_write(&esw->mode_lock); down_read(&esw->mode_lock);
err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode); err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
up_write(&esw->mode_lock); up_read(&esw->mode_lock);
return err; return err;
} }
...@@ -3749,9 +3746,9 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, ...@@ -3749,9 +3746,9 @@ int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
if (IS_ERR(esw)) if (IS_ERR(esw))
return PTR_ERR(esw); return PTR_ERR(esw);
down_write(&esw->mode_lock); down_read(&esw->mode_lock);
*encap = esw->offloads.encap; *encap = esw->offloads.encap;
up_write(&esw->mode_lock); up_read(&esw->mode_lock);
return 0; return 0;
} }
......
...@@ -69,6 +69,13 @@ enum { ...@@ -69,6 +69,13 @@ enum {
MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa), MLX5_MTPPS_FS_OUT_PULSE_DURATION_NS = BIT(0xa),
}; };
enum {
MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN = S16_MIN,
MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX = S16_MAX,
MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN = -200000,
MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX = 200000,
};
static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev) static bool mlx5_real_time_mode(struct mlx5_core_dev *mdev)
{ {
return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev)); return (mlx5_is_real_time_rq(mdev) || mlx5_is_real_time_sq(mdev));
...@@ -86,6 +93,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev) ...@@ -86,6 +93,22 @@ static bool mlx5_modify_mtutc_allowed(struct mlx5_core_dev *mdev)
return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify); return MLX5_CAP_MCAM_FEATURE(mdev, ptpcyc2realtime_modify);
} }
static bool mlx5_is_mtutc_time_adj_cap(struct mlx5_core_dev *mdev, s64 delta)
{
s64 min = MLX5_MTUTC_OPERATION_ADJUST_TIME_MIN;
s64 max = MLX5_MTUTC_OPERATION_ADJUST_TIME_MAX;
if (MLX5_CAP_MCAM_FEATURE(mdev, mtutc_time_adjustment_extended_range)) {
min = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MIN;
max = MLX5_MTUTC_OPERATION_ADJUST_TIME_EXTENDED_MAX;
}
if (delta < min || delta > max)
return false;
return true;
}
static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size) static int mlx5_set_mtutc(struct mlx5_core_dev *dev, u32 *mtutc, u32 size)
{ {
u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {}; u32 out[MLX5_ST_SZ_DW(mtutc_reg)] = {};
...@@ -288,8 +311,8 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta) ...@@ -288,8 +311,8 @@ static int mlx5_ptp_adjtime_real_time(struct mlx5_core_dev *mdev, s64 delta)
if (!mlx5_modify_mtutc_allowed(mdev)) if (!mlx5_modify_mtutc_allowed(mdev))
return 0; return 0;
/* HW time adjustment range is s16. If out of range, settime instead */ /* HW time adjustment range is checked. If out of range, settime instead */
if (delta < S16_MIN || delta > S16_MAX) { if (!mlx5_is_mtutc_time_adj_cap(mdev, delta)) {
struct timespec64 ts; struct timespec64 ts;
s64 ns; s64 ns;
...@@ -326,6 +349,19 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ...@@ -326,6 +349,19 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
return 0; return 0;
} }
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
mdev = container_of(clock, struct mlx5_core_dev, clock);
if (!mlx5_is_mtutc_time_adj_cap(mdev, delta))
return -ERANGE;
return mlx5_ptp_adjtime(ptp, delta);
}
static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq) static int mlx5_ptp_adjfreq_real_time(struct mlx5_core_dev *mdev, s32 freq)
{ {
u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {}; u32 in[MLX5_ST_SZ_DW(mtutc_reg)] = {};
...@@ -688,6 +724,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = { ...@@ -688,6 +724,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.n_pins = 0, .n_pins = 0,
.pps = 0, .pps = 0,
.adjfine = mlx5_ptp_adjfine, .adjfine = mlx5_ptp_adjfine,
.adjphase = mlx5_ptp_adjphase,
.adjtime = mlx5_ptp_adjtime, .adjtime = mlx5_ptp_adjtime,
.gettimex64 = mlx5_ptp_gettimex, .gettimex64 = mlx5_ptp_gettimex,
.settime64 = mlx5_ptp_settime, .settime64 = mlx5_ptp_settime,
......
...@@ -9941,7 +9941,9 @@ struct mlx5_ifc_pcam_reg_bits { ...@@ -9941,7 +9941,9 @@ struct mlx5_ifc_pcam_reg_bits {
}; };
struct mlx5_ifc_mcam_enhanced_features_bits { struct mlx5_ifc_mcam_enhanced_features_bits {
u8 reserved_at_0[0x5d]; u8 reserved_at_0[0x51];
u8 mtutc_time_adjustment_extended_range[0x1];
u8 reserved_at_52[0xb];
u8 mcia_32dwords[0x1]; u8 mcia_32dwords[0x1];
u8 out_pulse_duration_ns[0x1]; u8 out_pulse_duration_ns[0x1];
u8 npps_period[0x1]; u8 npps_period[0x1];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment