Commit c8b441d2 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2022-02-16' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2022-02-16

Misc updates for mlx5:
1) Alex Liu Adds support for using xdp->data_meta

2) Aya Levin Adds PTP counters and port time stamp mode for representors and
switchdev mode.

3) Tariq Toukan, Striding RQ simple improvements.

4) Roi Dayan (7): Create multiple attr instances per flow

Some TC actions use post actions for their implementation.
For example CT and sample actions.

Create a new flow attr instance after each multi table action and
create a post action rule for it as a generic parsing step.
Now multi table actions like CT, sample don't require to do it.

When flow has multiple attr instances, the first flow attr is being
offloaded normally and linked to the next attr (post action rule) by
setting an id on reg_c for matching.
Post action rule (rule created from second attr instance) match the
id on reg_c and does rest of the actions.

Example rule with actions CT,goto will be created with 2 attr instances
as following: attr1(CT)->attr2(goto)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 22b67d17 b070e703
......@@ -714,6 +714,7 @@ struct mlx5e_rq {
u8 umr_in_progress;
u8 umr_last_bulk;
u8 umr_completed;
u8 min_wqe_bulk;
struct mlx5e_shampo_hd *shampo;
} mpwqe;
};
......
......@@ -178,6 +178,12 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
}
u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz)
{
#define UMR_WQE_BULK (2)
return min_t(unsigned int, UMR_WQE_BULK, wq_sz / 2 - 1);
}
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk)
......@@ -359,12 +365,13 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
{
/* Prefer Striding RQ, unless any of the following holds:
* - Striding RQ configuration is not possible/supported.
* - Slow PCI heuristic.
* - CQE compression is ON, and stride_index mini_cqe layout is not supported.
* - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
*
* No XSK params: checking the availability of striding RQ in general.
*/
if (!slow_pci_heuristic(mdev) &&
if ((!MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ||
MLX5_CAP_GEN(mdev, mini_cqe_resp_stride_index)) &&
mlx5e_striding_rq_possible(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ||
!mlx5e_rx_is_linear_skb(params, NULL)))
......
......@@ -129,6 +129,7 @@ u8 mlx5e_mpwqe_get_log_stride_size(struct mlx5_core_dev *mdev,
u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
u8 mlx5e_mpwqe_get_min_wqe_bulk(unsigned int wq_sz);
u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
struct mlx5e_xsk_param *xsk);
......
......@@ -263,14 +263,14 @@ int mlx5e_rep_tc_init(struct mlx5e_rep_priv *rpriv)
INIT_LIST_HEAD(&uplink_priv->unready_flows);
/* init shared tc flow table */
err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
err = mlx5e_tc_esw_init(uplink_priv);
return err;
}
void mlx5e_rep_tc_cleanup(struct mlx5e_rep_priv *rpriv)
{
/* delete shared tc flow table */
mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
mlx5e_tc_esw_cleanup(&rpriv->uplink_priv);
mutex_destroy(&rpriv->uplink_priv.unready_flows_lock);
}
......
......@@ -442,7 +442,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
goto inner_tir;
err = mlx5e_tir_modify(tir, builder);
if (err) {
mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
......@@ -457,7 +457,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
continue;
err = mlx5e_tir_modify(tir, builder);
if (err) {
mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_rss_warn(rss->mdev, "Failed to update packet merge state of inner indirect TIR %#x for traffic type %d: err = %d\n",
mlx5e_tir_get_tirn(tir), tt, err);
if (!final_err)
final_err = err;
......
......@@ -19,8 +19,7 @@ tc_act_parse_accept(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->flags |= MLX5_ATTR_FLAG_ACCEPT;
return 0;
......
......@@ -2,6 +2,7 @@
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "act.h"
#include "en/tc/post_act.h"
#include "en/tc_priv.h"
#include "mlx5_core.h"
......@@ -101,3 +102,75 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
parse_state->num_actions = flow_action->num_entries;
parse_state->extack = extack;
}
void
mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
struct mlx5e_tc_flow_action *flow_action_reorder)
{
struct flow_action_entry *act;
int i, j = 0;
flow_action_for_each(i, act, flow_action) {
/* Add CT action to be first. */
if (act->id == FLOW_ACTION_CT)
flow_action_reorder->entries[j++] = act;
}
flow_action_for_each(i, act, flow_action) {
if (act->id == FLOW_ACTION_CT)
continue;
flow_action_reorder->entries[j++] = act;
}
}
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct mlx5_flow_attr *attr,
enum mlx5_flow_namespace_type ns_type)
{
struct flow_action_entry *act;
struct mlx5e_tc_act *tc_act;
struct mlx5e_priv *priv;
int err = 0, i;
priv = parse_state->flow->priv;
flow_action_for_each(i, act, flow_action) {
tc_act = mlx5e_tc_act_get(act->id, ns_type);
if (!tc_act || !tc_act->post_parse ||
!tc_act->can_offload(parse_state, act, i, attr))
continue;
err = tc_act->post_parse(parse_state, priv, attr);
if (err)
goto out;
}
out:
return err;
}
int
mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct mlx5_flow_attr *next_attr)
{
struct mlx5_core_dev *mdev = flow->priv->mdev;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
int err;
mod_acts = &attr->parse_attr->mod_hdr_acts;
/* Set handle on current post act rule to next post act rule. */
err = mlx5e_tc_post_act_set_handle(mdev, next_attr->post_act_handle, mod_acts);
if (err) {
mlx5_core_warn(mdev, "Failed setting post action handle");
return err;
}
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
return 0;
}
......@@ -16,6 +16,7 @@ struct mlx5e_tc_act_parse_state {
unsigned int num_actions;
struct mlx5e_tc_flow *flow;
struct netlink_ext_ack *extack;
u32 actions;
bool ct;
bool encap;
bool decap;
......@@ -41,6 +42,15 @@ struct mlx5e_tc_act {
int (*post_parse)(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr);
bool (*is_multi_table_act)(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr);
};
struct mlx5e_tc_flow_action {
unsigned int num_entries;
struct flow_action_entry **entries;
};
extern struct mlx5e_tc_act mlx5e_tc_act_drop;
......@@ -73,4 +83,19 @@ mlx5e_tc_act_init_parse_state(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct netlink_ext_ack *extack);
void
mlx5e_tc_act_reorder_flow_actions(struct flow_action *flow_action,
struct mlx5e_tc_flow_action *flow_action_reorder);
int
mlx5e_tc_act_post_parse(struct mlx5e_tc_act_parse_state *parse_state,
struct flow_action *flow_action,
struct mlx5_flow_attr *attr,
enum mlx5_flow_namespace_type ns_type);
int
mlx5e_tc_act_set_next_post_act(struct mlx5e_tc_flow *flow,
struct mlx5_flow_attr *attr,
struct mlx5_flow_attr *next_attr);
#endif /* __MLX5_EN_TC_ACT_H__ */
......@@ -14,14 +14,8 @@ tc_act_can_offload_ct(struct mlx5e_tc_act_parse_state *parse_state,
bool clear_action = act->ct.action & TCA_CT_ACT_CLEAR;
struct netlink_ext_ack *extack = parse_state->extack;
if (flow_flag_test(parse_state->flow, SAMPLE)) {
NL_SET_ERR_MSG_MOD(extack,
"Sample action with connection tracking is not supported");
return false;
}
if (parse_state->ct && !clear_action) {
NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supoported");
NL_SET_ERR_MSG_MOD(extack, "Multiple CT actions are not supported");
return false;
}
......@@ -56,8 +50,20 @@ tc_act_parse_ct(struct mlx5e_tc_act_parse_state *parse_state,
return 0;
}
static bool
tc_act_is_multi_table_act_ct(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr)
{
if (act->ct.action & TCA_CT_ACT_CLEAR)
return false;
return true;
}
struct mlx5e_tc_act mlx5e_tc_act_ct = {
.can_offload = tc_act_can_offload_ct,
.parse_action = tc_act_parse_ct,
.is_multi_table_act = tc_act_is_multi_table_act_ct,
};
......@@ -19,8 +19,7 @@ tc_act_parse_drop(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
return 0;
}
......
......@@ -76,8 +76,7 @@ tc_act_parse_goto(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->dest_chain = act->chain_index;
return 0;
......
......@@ -296,8 +296,7 @@ tc_act_parse_mirred(struct mlx5e_tc_act_parse_state *parse_state,
if (err)
return err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
......
......@@ -40,8 +40,7 @@ tc_act_parse_mirred_nic(struct mlx5e_tc_act_parse_state *parse_state,
{
attr->parse_attr->mirred_ifindex[0] = act->dev->ifindex;
flow_flag_set(parse_state->flow, HAIRPIN);
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
return 0;
}
......
......@@ -58,8 +58,7 @@ tc_act_parse_redirect_ingress(struct mlx5e_tc_act_parse_state *parse_state,
struct net_device *out_dev = act->dev;
int err;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS,
......
......@@ -4,6 +4,7 @@
#include <net/psample.h>
#include "act.h"
#include "en/tc_priv.h"
#include "en/tc/act/sample.h"
static bool
tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
......@@ -12,10 +13,12 @@ tc_act_can_offload_sample(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5_flow_attr *attr)
{
struct netlink_ext_ack *extack = parse_state->extack;
bool ct_nat;
if (flow_flag_test(parse_state->flow, CT)) {
NL_SET_ERR_MSG_MOD(extack,
"Sample action with connection tracking is not supported");
ct_nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
if (flow_flag_test(parse_state->flow, CT) && ct_nat) {
NL_SET_ERR_MSG_MOD(extack, "Sample action with CT NAT is not supported");
return false;
}
......@@ -42,7 +45,27 @@ tc_act_parse_sample(struct mlx5e_tc_act_parse_state *parse_state,
return 0;
}
bool
mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
struct mlx5_flow_attr *attr)
{
if (MLX5_CAP_GEN(mdev, reg_c_preserve) ||
attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
return true;
return false;
}
static bool
tc_act_is_multi_table_act_sample(struct mlx5e_priv *priv,
const struct flow_action_entry *act,
struct mlx5_flow_attr *attr)
{
return mlx5e_tc_act_sample_is_multi_table(priv->mdev, attr);
}
struct mlx5e_tc_act mlx5e_tc_act_sample = {
.can_offload = tc_act_can_offload_sample,
.parse_action = tc_act_parse_sample,
.is_multi_table_act = tc_act_is_multi_table_act_sample,
};
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
#ifndef __MLX5_EN_TC_ACT_SAMPLE_H__
#define __MLX5_EN_TC_ACT_SAMPLE_H__
#include <net/flow_offload.h>
#include "en/tc_priv.h"
bool
mlx5e_tc_act_sample_is_multi_table(struct mlx5_core_dev *mdev,
struct mlx5_flow_attr *attr);
#endif /* __MLX5_EN_TC_ACT_SAMPLE_H__ */
......@@ -26,8 +26,7 @@ tc_act_parse_trap(struct mlx5e_tc_act_parse_state *parse_state,
struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr)
{
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
return 0;
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#include "en/tc_priv.h"
#include "en_tc.h"
#include "post_act.h"
#include "mlx5_core.h"
......@@ -75,21 +76,47 @@ mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act)
kfree(post_act);
}
int
mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
struct mlx5e_post_act_handle *handle)
{
struct mlx5_flow_spec *spec;
int err;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
/* Post action rule matches on fte_id and executes original rule's tc rule action */
mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, handle->id, MLX5_POST_ACTION_MASK);
handle->rule = mlx5e_tc_rule_offload(post_act->priv, spec, handle->attr);
if (IS_ERR(handle->rule)) {
err = PTR_ERR(handle->rule);
netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
goto err_rule;
}
kvfree(spec);
return 0;
err_rule:
kvfree(spec);
return err;
}
struct mlx5e_post_act_handle *
mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr)
{
u32 attr_sz = ns_to_attr_sz(post_act->ns_type);
struct mlx5e_post_act_handle *handle = NULL;
struct mlx5_flow_attr *post_attr = NULL;
struct mlx5_flow_spec *spec = NULL;
struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *post_attr;
int err;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
post_attr = mlx5_alloc_flow_attr(post_act->ns_type);
if (!handle || !spec || !post_attr) {
if (!handle || !post_attr) {
kfree(post_attr);
kvfree(spec);
kfree(handle);
return ERR_PTR(-ENOMEM);
}
......@@ -100,8 +127,7 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
post_attr->ft = post_act->ft;
post_attr->inner_match_level = MLX5_MATCH_NONE;
post_attr->outer_match_level = MLX5_MATCH_NONE;
post_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP);
post_attr->flags &= ~MLX5_ATTR_FLAG_SAMPLE;
post_attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_DECAP;
handle->ns_type = post_act->ns_type;
/* Splits were handled before post action */
......@@ -113,36 +139,29 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
if (err)
goto err_xarray;
/* Post action rule matches on fte_id and executes original rule's
* tc rule action
*/
mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG,
handle->id, MLX5_POST_ACTION_MASK);
handle->rule = mlx5_tc_rule_insert(post_act->priv, spec, post_attr);
if (IS_ERR(handle->rule)) {
err = PTR_ERR(handle->rule);
netdev_warn(post_act->priv->netdev, "Failed to add post action rule");
goto err_rule;
}
handle->attr = post_attr;
kvfree(spec);
return handle;
err_rule:
xa_erase(&post_act->ids, handle->id);
err_xarray:
kfree(post_attr);
kvfree(spec);
kfree(handle);
return ERR_PTR(err);
}
void
mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
struct mlx5e_post_act_handle *handle)
{
mlx5e_tc_rule_unoffload(post_act->priv, handle->rule, handle->attr);
handle->rule = NULL;
}
void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle)
{
mlx5_tc_rule_delete(post_act->priv, handle->rule, handle->attr);
if (!IS_ERR_OR_NULL(handle->rule))
mlx5e_tc_post_act_unoffload(post_act, handle);
xa_erase(&post_act->ids, handle->id);
kfree(handle->attr);
kfree(handle);
......
......@@ -24,6 +24,14 @@ mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *at
void
mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle);
int
mlx5e_tc_post_act_offload(struct mlx5e_post_act *post_act,
struct mlx5e_post_act_handle *handle);
void
mlx5e_tc_post_act_unoffload(struct mlx5e_post_act *post_act,
struct mlx5e_post_act_handle *handle);
struct mlx5_flow_table *
mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act);
......
......@@ -5,6 +5,7 @@
#include <net/psample.h>
#include "en/mapping.h"
#include "en/tc/post_act.h"
#include "en/tc/act/sample.h"
#include "en/mod_hdr.h"
#include "sample.h"
#include "eswitch.h"
......@@ -46,14 +47,12 @@ struct mlx5e_sample_flow {
struct mlx5_flow_handle *pre_rule;
struct mlx5_flow_attr *post_attr;
struct mlx5_flow_handle *post_rule;
struct mlx5e_post_act_handle *post_act_handle;
};
struct mlx5e_sample_restore {
struct hlist_node hlist;
struct mlx5_modify_hdr *modify_hdr;
struct mlx5_flow_handle *rule;
struct mlx5e_post_act_handle *post_act_handle;
u32 obj_id;
int count;
};
......@@ -231,69 +230,46 @@ sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler)
*/
static struct mlx5_modify_hdr *
sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id,
struct mlx5e_post_act_handle *handle)
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
struct mlx5e_tc_mod_hdr_acts mod_acts = {};
struct mlx5_modify_hdr *modify_hdr;
int err;
err = mlx5e_tc_match_to_reg_set(mdev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
err = mlx5e_tc_match_to_reg_set(mdev, mod_acts, MLX5_FLOW_NAMESPACE_FDB,
CHAIN_TO_REG, obj_id);
if (err)
goto err_set_regc0;
if (handle) {
err = mlx5e_tc_post_act_set_handle(mdev, handle, &mod_acts);
if (err)
goto err_post_act;
}
modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB,
mod_acts.num_actions,
mod_acts.actions);
mod_acts->num_actions,
mod_acts->actions);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
}
mlx5e_mod_hdr_dealloc(&mod_acts);
mlx5e_mod_hdr_dealloc(mod_acts);
return modify_hdr;
err_modify_hdr:
err_post_act:
mlx5e_mod_hdr_dealloc(&mod_acts);
mlx5e_mod_hdr_dealloc(mod_acts);
err_set_regc0:
return ERR_PTR(err);
}
static u32
restore_hash(u32 obj_id, struct mlx5e_post_act_handle *post_act_handle)
{
return jhash_2words(obj_id, hash32_ptr(post_act_handle), 0);
}
static bool
restore_equal(struct mlx5e_sample_restore *restore, u32 obj_id,
struct mlx5e_post_act_handle *post_act_handle)
{
return restore->obj_id == obj_id && restore->post_act_handle == post_act_handle;
}
static struct mlx5e_sample_restore *
sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
struct mlx5e_post_act_handle *post_act_handle)
struct mlx5e_tc_mod_hdr_acts *mod_acts)
{
struct mlx5_eswitch *esw = tc_psample->esw;
struct mlx5_core_dev *mdev = esw->dev;
struct mlx5e_sample_restore *restore;
struct mlx5_modify_hdr *modify_hdr;
u32 hash_key;
int err;
mutex_lock(&tc_psample->restore_lock);
hash_key = restore_hash(obj_id, post_act_handle);
hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, hash_key)
if (restore_equal(restore, obj_id, post_act_handle))
hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, obj_id)
if (restore->obj_id == obj_id)
goto add_ref;
restore = kzalloc(sizeof(*restore), GFP_KERNEL);
......@@ -302,9 +278,8 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
goto err_alloc;
}
restore->obj_id = obj_id;
restore->post_act_handle = post_act_handle;
modify_hdr = sample_modify_hdr_get(mdev, obj_id, post_act_handle);
modify_hdr = sample_modify_hdr_get(mdev, obj_id, mod_acts);
if (IS_ERR(modify_hdr)) {
err = PTR_ERR(modify_hdr);
goto err_modify_hdr;
......@@ -317,7 +292,7 @@ sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id,
goto err_restore;
}
hash_add(tc_psample->restore_hashtbl, &restore->hlist, hash_key);
hash_add(tc_psample->restore_hashtbl, &restore->hlist, obj_id);
add_ref:
restore->count++;
mutex_unlock(&tc_psample->restore_lock);
......@@ -494,10 +469,10 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr)
{
struct mlx5e_post_act_handle *post_act_handle = NULL;
struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
struct mlx5_esw_flow_attr *pre_esw_attr;
struct mlx5_mapped_obj restore_obj = {};
struct mlx5e_tc_mod_hdr_acts *mod_acts;
struct mlx5e_sample_flow *sample_flow;
struct mlx5e_sample_attr *sample_attr;
struct mlx5_flow_attr *pre_attr;
......@@ -522,18 +497,11 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
* original flow table.
*/
esw = tc_psample->esw;
if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) ||
attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) {
if (mlx5e_tc_act_sample_is_multi_table(esw->dev, attr)) {
struct mlx5_flow_table *ft;
ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act);
default_tbl_id = ft->id;
post_act_handle = mlx5e_tc_post_act_add(tc_psample->post_act, attr);
if (IS_ERR(post_act_handle)) {
err = PTR_ERR(post_act_handle);
goto err_post_act;
}
sample_flow->post_act_handle = post_act_handle;
} else {
err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id);
if (err)
......@@ -560,7 +528,8 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
sample_attr->restore_obj_id = obj_id;
/* Create sample restore context. */
sample_flow->restore = sample_restore_get(tc_psample, obj_id, post_act_handle);
mod_acts = &attr->parse_attr->mod_hdr_acts;
sample_flow->restore = sample_restore_get(tc_psample, obj_id, mod_acts);
if (IS_ERR(sample_flow->restore)) {
err = PTR_ERR(sample_flow->restore);
goto err_sample_restore;
......@@ -586,6 +555,7 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
pre_attr->outer_match_level = attr->outer_match_level;
pre_attr->chain = attr->chain;
pre_attr->prio = attr->prio;
pre_attr->ft = attr->ft;
pre_attr->sample_attr = *sample_attr;
pre_esw_attr = pre_attr->esw_attr;
pre_esw_attr->in_mdev = esw_attr->in_mdev;
......@@ -611,9 +581,6 @@ mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample,
if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
err_post_rule:
if (post_act_handle)
mlx5e_tc_post_act_del(tc_psample->post_act, post_act_handle);
err_post_act:
kfree(sample_flow);
return ERR_PTR(err);
}
......@@ -639,9 +606,7 @@ mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample,
sample_restore_put(tc_psample, sample_flow->restore);
mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr.restore_obj_id);
sampler_put(tc_psample, sample_flow->sampler);
if (sample_flow->post_act_handle)
mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle);
else
if (sample_flow->post_rule)
del_post_rule(esw, sample_flow, attr);
kfree(sample_flow->pre_attr);
......
......@@ -18,6 +18,7 @@
#include "lib/fs_chains.h"
#include "en/tc_ct.h"
#include "en/tc_priv.h"
#include "en/mod_hdr.h"
#include "en/mapping.h"
#include "en/tc/post_act.h"
......@@ -68,7 +69,6 @@ struct mlx5_tc_ct_priv {
struct mlx5_ct_flow {
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_flow_handle *pre_ct_rule;
struct mlx5e_post_act_handle *post_act_handle;
struct mlx5_ct_ft *ft;
u32 chain_mapping;
};
......@@ -1756,7 +1756,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
/* We translate the tc filter with CT action to the following HW model:
*
* +---------------------+
* + ft prio (tc chain) +
* + ft prio (tc chain) +
* + original match +
* +---------------------+
* | set chain miss mapping
......@@ -1766,7 +1766,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* v
* +---------------------+
* + pre_ct/pre_ct_nat + if matches +-------------------------+
* + zone+nat match +---------------->+ post_act (see below) +
* + zone+nat match +---------------->+ post_act (see below) +
* +---------------------+ set zone +-------------------------+
* | set zone
* v
......@@ -1781,7 +1781,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft)
* | do nat (if needed)
* v
* +--------------+
* + post_act + original filter actions
* + post_act + original filter actions
* + fte_id match +------------------------>
* +--------------+
*/
......@@ -1792,9 +1792,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
{
bool nat = attr->ct_attr.ct_action & TCA_CT_ACT_NAT;
struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev);
struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {};
struct mlx5e_tc_mod_hdr_acts *pre_mod_acts;
u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type);
struct mlx5e_post_act_handle *handle;
struct mlx5_flow_attr *pre_ct_attr;
struct mlx5_modify_hdr *mod_hdr;
struct mlx5_ct_flow *ct_flow;
......@@ -1817,14 +1816,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
ct_flow->ft = ft;
handle = mlx5e_tc_post_act_add(ct_priv->post_act, attr);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
ct_dbg("Failed to allocate post action handle");
goto err_post_act_handle;
}
ct_flow->post_act_handle = handle;
/* Base flow attributes of both rules on original rule attribute */
ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type);
if (!ct_flow->pre_ct_attr) {
......@@ -1834,6 +1825,7 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
pre_ct_attr = ct_flow->pre_ct_attr;
memcpy(pre_ct_attr, attr, attr_sz);
pre_mod_acts = &pre_ct_attr->parse_attr->mod_hdr_acts;
/* Modify the original rule's action to fwd and modify, leave decap */
pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP;
......@@ -1852,25 +1844,19 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
ct_flow->chain_mapping = chain_mapping;
err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type,
err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts, ct_priv->ns_type,
CHAIN_TO_REG, chain_mapping);
if (err) {
ct_dbg("Failed to set chain register mapping");
goto err_mapping;
}
err = mlx5e_tc_post_act_set_handle(priv->mdev, handle, &pre_mod_acts);
if (err) {
ct_dbg("Failed to set post action handle");
goto err_mapping;
}
/* If original flow is decap, we do it before going into ct table
* so add a rewrite for the tunnel match_id.
*/
if ((pre_ct_attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
attr->chain == 0) {
err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts,
err = mlx5e_tc_match_to_reg_set(priv->mdev, pre_mod_acts,
ct_priv->ns_type,
TUNNEL_TO_REG,
attr->tunnel_id);
......@@ -1881,8 +1867,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
mod_hdr = mlx5_modify_header_alloc(priv->mdev, ct_priv->ns_type,
pre_mod_acts.num_actions,
pre_mod_acts.actions);
pre_mod_acts->num_actions,
pre_mod_acts->actions);
if (IS_ERR(mod_hdr)) {
err = PTR_ERR(mod_hdr);
ct_dbg("Failed to create pre ct mod hdr");
......@@ -1902,20 +1888,18 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv,
}
attr->ct_attr.ct_flow = ct_flow;
mlx5e_mod_hdr_dealloc(&pre_mod_acts);
mlx5e_mod_hdr_dealloc(pre_mod_acts);
return ct_flow->pre_ct_rule;
err_insert_orig:
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
err_mapping:
mlx5e_mod_hdr_dealloc(&pre_mod_acts);
mlx5e_mod_hdr_dealloc(pre_mod_acts);
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
err_get_chain:
kfree(ct_flow->pre_ct_attr);
err_alloc_pre:
mlx5e_tc_post_act_del(ct_priv->post_act, handle);
err_post_act_handle:
mlx5_tc_ct_del_ft_cb(ct_priv, ft);
err_ft:
kfree(ct_flow);
......@@ -1952,11 +1936,8 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv,
mlx5_tc_rule_delete(priv, ct_flow->pre_ct_rule, pre_ct_attr);
mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr);
if (ct_flow->post_act_handle) {
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
mlx5e_tc_post_act_del(ct_priv->post_act, ct_flow->post_act_handle);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
}
mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping);
mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft);
kfree(ct_flow->pre_ct_attr);
kfree(ct_flow);
......
......@@ -109,6 +109,7 @@ struct mlx5e_tc_flow {
struct completion init_done;
struct completion del_hw_done;
struct mlx5_flow_attr *attr;
struct list_head attrs;
};
struct mlx5_flow_handle *
......@@ -129,6 +130,12 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_flow_attr *attr);
struct mlx5_flow_attr *
mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow);
void mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow);
int mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow);
bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow);
bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow);
......
......@@ -173,19 +173,29 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW))
continue;
attr = flow->attr;
esw_attr = attr->esw_attr;
spec = &attr->parse_attr->spec;
spec = &flow->attr->parse_attr->spec;
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
esw_attr->dests[flow->tmp_entry_index].pkt_reformat = e->pkt_reformat;
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
/* Do not offload flows with unresolved neighbors */
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
continue;
err = mlx5e_tc_offload_flow_post_acts(flow);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
err);
continue;
}
/* update from slow path rule to encap rule */
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
......@@ -214,12 +224,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
list_for_each_entry(flow, flow_list, tmp_list) {
if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW))
continue;
attr = flow->attr;
esw_attr = attr->esw_attr;
spec = &attr->parse_attr->spec;
spec = &flow->attr->parse_attr->spec;
/* update from encap rule to slow path rule */
rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec);
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
/* mark the flow's encap dest as non-valid */
esw_attr->dests[flow->tmp_entry_index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
......@@ -230,7 +241,8 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
continue;
}
mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->attr);
mlx5e_tc_unoffload_flow_post_acts(flow);
flow->rule[0] = rule;
/* was unset when fast path rule removed */
flow_flag_set(flow, OFFLOADED);
......@@ -495,6 +507,9 @@ void mlx5e_detach_encap(struct mlx5e_priv *priv,
struct mlx5e_encap_entry *e = flow->encaps[out_index].e;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
if (!mlx5e_is_eswitch_flow(flow))
return;
if (attr->esw_attr->dests[out_index].flags &
MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
mlx5e_detach_encap_route(priv, flow, out_index);
......@@ -1360,17 +1375,19 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
list_for_each_entry(flow, encap_flows, tmp_list) {
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct mlx5_esw_flow_attr *esw_attr;
struct mlx5_flow_handle *rule;
struct mlx5_flow_attr *attr;
struct mlx5_flow_spec *spec;
if (flow_flag_test(flow, FAILED))
continue;
spec = &flow->attr->parse_attr->spec;
attr = mlx5e_tc_get_encap_attr(flow);
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
spec = &parse_attr->spec;
err = mlx5e_update_vf_tunnel(esw, esw_attr, &parse_attr->mod_hdr_acts,
e->out_dev, e->route_dev_ifindex,
......@@ -1392,9 +1409,18 @@ static void mlx5e_reoffload_encap(struct mlx5e_priv *priv,
esw_attr->dests[flow->tmp_entry_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
if (!mlx5e_tc_flow_all_encaps_valid(esw_attr))
goto offload_to_slow_path;
err = mlx5e_tc_offload_flow_post_acts(flow);
if (err) {
mlx5_core_warn(priv->mdev, "Failed to update flow post acts, %d\n",
err);
goto offload_to_slow_path;
}
/* update from slow path rule to encap rule */
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, attr);
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, flow->attr);
if (IS_ERR(rule)) {
mlx5e_tc_unoffload_flow_post_acts(flow);
err = PTR_ERR(rule);
mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
err);
......
......@@ -595,6 +595,7 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
rq->mpwqe.log_stride_sz = mlx5e_mpwqe_get_log_stride_size(mdev, params, xsk);
rq->mpwqe.num_strides =
BIT(mlx5e_mpwqe_get_log_num_strides(mdev, params, xsk));
rq->mpwqe.min_wqe_bulk = mlx5e_mpwqe_get_min_wqe_bulk(wq_sz);
rq->buff.frame0_sz = (1 << rq->mpwqe.log_stride_sz);
......
......@@ -55,6 +55,7 @@
#include "diag/en_rep_tracepoint.h"
#include "en_accel/ipsec.h"
#include "en/tc/int_port.h"
#include "en/ptp.h"
#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
......@@ -401,13 +402,18 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
int n, tc, nch, num_sqs = 0;
struct mlx5e_channel *c;
int n, tc, num_sqs = 0;
int err = -ENOMEM;
bool ptp_sq;
u32 *sqs;
sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params),
sizeof(*sqs), GFP_KERNEL);
ptp_sq = !!(priv->channels.ptp &&
MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS));
nch = priv->channels.num + ptp_sq;
sqs = kcalloc(nch * mlx5e_get_dcb_num_tc(&priv->channels.params), sizeof(*sqs),
GFP_KERNEL);
if (!sqs)
goto out;
......@@ -416,6 +422,12 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
for (tc = 0; tc < c->num_tc; tc++)
sqs[num_sqs++] = c->sq[tc].sqn;
}
if (ptp_sq) {
struct mlx5e_ptp *ptp_ch = priv->channels.ptp;
for (tc = 0; tc < ptp_ch->num_tc; tc++)
sqs[num_sqs++] = ptp_ch->ptpsq[tc].txqsq.sqn;
}
err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
kfree(sqs);
......@@ -930,15 +942,21 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return err;
}
err = mlx5e_tc_ht_init(&rpriv->tc_ht);
if (err)
goto err_ht_init;
if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
err = mlx5e_init_uplink_rep_tx(rpriv);
if (err)
goto destroy_tises;
goto err_init_tx;
}
return 0;
destroy_tises:
err_init_tx:
mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
err_ht_init:
mlx5e_destroy_tises(priv);
return err;
}
......@@ -958,6 +976,8 @@ static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
if (rpriv->rep->vport == MLX5_VPORT_UPLINK)
mlx5e_cleanup_uplink_rep_tx(rpriv);
mlx5e_tc_ht_cleanup(&rpriv->tc_ht);
}
static void mlx5e_rep_enable(struct mlx5e_priv *priv)
......@@ -1094,6 +1114,7 @@ static mlx5e_stats_grp_t mlx5e_ul_rep_stats_grps[] = {
&MLX5E_STATS_GRP(ipsec_sw),
&MLX5E_STATS_GRP(ipsec_hw),
#endif
&MLX5E_STATS_GRP(ptp),
};
static unsigned int mlx5e_ul_rep_stats_grps_num(struct mlx5e_priv *priv)
......
......@@ -64,11 +64,6 @@ struct mlx5e_tc_tun_encap;
struct mlx5e_post_act;
struct mlx5_rep_uplink_priv {
/* Filters DB - instantiated by the uplink representor and shared by
* the uplink's VFs
*/
struct rhashtable tc_ht;
/* indirect block callbacks are invoked on bind/unbind events
* on registered higher level devices (e.g. tunnel devices)
*
......@@ -113,6 +108,7 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
struct rhashtable tc_ht;
};
static inline
......
......@@ -960,8 +960,7 @@ INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
rq->stats->congst_umr++;
#define UMR_WQE_BULK (2)
if (likely(missing < UMR_WQE_BULK))
if (likely(missing < rq->mpwqe.min_wqe_bulk))
return false;
if (rq->page_pool)
......@@ -1489,7 +1488,7 @@ static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
static inline
struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
u32 frag_size, u16 headroom,
u32 cqe_bcnt)
u32 cqe_bcnt, u32 metasize)
{
struct sk_buff *skb = build_skb(va, frag_size);
......@@ -1501,6 +1500,9 @@ struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
skb_reserve(skb, headroom);
skb_put(skb, cqe_bcnt);
if (metasize)
skb_metadata_set(skb, metasize);
return skb;
}
......@@ -1508,7 +1510,7 @@ static void mlx5e_fill_xdp_buff(struct mlx5e_rq *rq, void *va, u16 headroom,
u32 len, struct xdp_buff *xdp)
{
xdp_init_buff(xdp, rq->buff.frame0_sz, &rq->xdp_rxq);
xdp_prepare_buff(xdp, va, headroom, len, false);
xdp_prepare_buff(xdp, va, headroom, len, true);
}
static struct sk_buff *
......@@ -1521,6 +1523,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
u32 metasize;
va = page_address(di->page) + wi->offset;
data = va + rx_headroom;
......@@ -1537,7 +1540,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
rx_headroom = xdp.data - xdp.data_hard_start;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt);
metasize = xdp.data - xdp.data_meta;
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
if (unlikely(!skb))
return NULL;
......@@ -1836,6 +1840,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
struct sk_buff *skb;
void *va, *data;
u32 frag_size;
u32 metasize;
/* Check packet size. Note LRO doesn't use linear SKB */
if (unlikely(cqe_bcnt > rq->hw_mtu)) {
......@@ -1861,7 +1866,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
rx_headroom = xdp.data - xdp.data_hard_start;
frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt32);
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32);
metasize = xdp.data - xdp.data_meta;
skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt32, metasize);
if (unlikely(!skb))
return NULL;
......@@ -1892,7 +1898,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE);
prefetchw(hdr);
prefetch(data);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size);
skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
if (unlikely(!skb))
return NULL;
......
......@@ -2348,7 +2348,7 @@ MLX5E_DEFINE_STATS_GRP(channels, 0);
MLX5E_DEFINE_STATS_GRP(per_port_buff_congest, 0);
MLX5E_DEFINE_STATS_GRP(eth_ext, 0);
static MLX5E_DEFINE_STATS_GRP(tls, 0);
static MLX5E_DEFINE_STATS_GRP(ptp, 0);
MLX5E_DEFINE_STATS_GRP(ptp, 0);
static MLX5E_DEFINE_STATS_GRP(qos, 0);
/* The stats groups order is opposite to the update_stats() order calls */
......
......@@ -459,5 +459,6 @@ extern MLX5E_DECLARE_STATS_GRP(channels);
extern MLX5E_DECLARE_STATS_GRP(per_port_buff_congest);
extern MLX5E_DECLARE_STATS_GRP(ipsec_hw);
extern MLX5E_DECLARE_STATS_GRP(ipsec_sw);
extern MLX5E_DECLARE_STATS_GRP(ptp);
#endif /* __MLX5_EN_STATS_H__ */
......@@ -53,7 +53,6 @@
ESW_FLOW_ATTR_SZ :\
NIC_FLOW_ATTR_SZ)
int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags);
struct mlx5e_tc_update_priv {
......@@ -84,6 +83,8 @@ struct mlx5_flow_attr {
u8 tun_ip_version;
int tunnel_id; /* mapped tunnel id */
u32 flags;
struct list_head list;
struct mlx5e_post_act_handle *post_act_handle;
union {
struct mlx5_esw_flow_attr esw_attr[0];
struct mlx5_nic_flow_attr nic_attr[0];
......@@ -167,8 +168,11 @@ enum {
#define MLX5_TC_FLAG(flag) BIT(MLX5E_TC_FLAG_##flag##_BIT)
int mlx5e_tc_esw_init(struct rhashtable *tc_ht);
void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht);
int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv);
void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv);
int mlx5e_tc_ht_init(struct rhashtable *tc_ht);
void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht);
int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
struct flow_cls_offload *f, unsigned long flags);
......@@ -304,6 +308,8 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
#else /* CONFIG_MLX5_CLS_ACT */
static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {}
static inline int mlx5e_tc_ht_init(struct rhashtable *tc_ht) { return 0; }
static inline void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht) {}
static inline int
mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
{ return -EOPNOTSUPP; }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment