Commit 0cf3f4c3 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2017-08-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2017-08-24

This series includes updates to mlx5 core driver.

From Gal and Saeed, three cleanup patches.
From Matan, Low level flow steering improvements and optimizations,
 - Use more efficient data structures for flow steering objects handling.
 - Add tracepoints to flow steering operations.
 - Overall these patches improve flow steering rule insertion rate by a
   factor of seven in large scales (~50K rules or more).

====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 256fbe11 4c03e69a
......@@ -4,7 +4,8 @@ subdir-ccflags-y += -I$(src)
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o
fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
diag/fs_tracepoint.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
......@@ -25,3 +26,5 @@ mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
CFLAGS_tracepoint.o := -I$(src)
/*
* Copyright (c) 2017, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#define CREATE_TRACE_POINTS
#include "fs_tracepoint.h"
#include <linux/stringify.h>
#define DECLARE_MASK_VAL(type, name) struct {type m; type v; } name
#define MASK_VAL(type, spec, name, mask, val, fld) \
DECLARE_MASK_VAL(type, name) = \
{.m = MLX5_GET(spec, mask, fld),\
.v = MLX5_GET(spec, val, fld)}
#define MASK_VAL_BE(type, spec, name, mask, val, fld) \
DECLARE_MASK_VAL(type, name) = \
{.m = MLX5_GET_BE(type, spec, mask, fld),\
.v = MLX5_GET_BE(type, spec, val, fld)}
#define GET_MASKED_VAL(name) (name.m & name.v)
#define GET_MASK_VAL(name, type, mask, val, fld) \
(name.m = MLX5_GET(type, mask, fld), \
name.v = MLX5_GET(type, val, fld), \
name.m & name.v)
#define PRINT_MASKED_VAL(name, p, format) { \
if (name.m) \
trace_seq_printf(p, __stringify(name) "=" format " ", name.v); \
}
#define PRINT_MASKED_VALP(name, cast, p, format) { \
if (name.m) \
trace_seq_printf(p, __stringify(name) "=" format " ", \
(cast)&name.v);\
}
static void print_lyr_2_4_hdrs(struct trace_seq *p,
const u32 *mask, const u32 *value)
{
#define MASK_VAL_L2(type, name, fld) \
MASK_VAL(type, fte_match_set_lyr_2_4, name, mask, value, fld)
DECLARE_MASK_VAL(u64, smac) = {
.m = MLX5_GET(fte_match_set_lyr_2_4, mask, smac_47_16) << 16 |
MLX5_GET(fte_match_set_lyr_2_4, mask, smac_15_0),
.v = MLX5_GET(fte_match_set_lyr_2_4, value, smac_47_16) << 16 |
MLX5_GET(fte_match_set_lyr_2_4, value, smac_15_0)};
DECLARE_MASK_VAL(u64, dmac) = {
.m = MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_47_16) << 16 |
MLX5_GET(fte_match_set_lyr_2_4, mask, dmac_15_0),
.v = MLX5_GET(fte_match_set_lyr_2_4, value, dmac_47_16) << 16 |
MLX5_GET(fte_match_set_lyr_2_4, value, dmac_15_0)};
MASK_VAL_L2(u16, ethertype, ethertype);
PRINT_MASKED_VALP(smac, u8 *, p, "%pM");
PRINT_MASKED_VALP(dmac, u8 *, p, "%pM");
PRINT_MASKED_VAL(ethertype, p, "%04x");
if (ethertype.m == 0xffff) {
if (ethertype.v == ETH_P_IP) {
#define MASK_VAL_L2_BE(type, name, fld) \
MASK_VAL_BE(type, fte_match_set_lyr_2_4, name, mask, value, fld)
MASK_VAL_L2_BE(u32, src_ipv4,
src_ipv4_src_ipv6.ipv4_layout.ipv4);
MASK_VAL_L2_BE(u32, dst_ipv4,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
PRINT_MASKED_VALP(src_ipv4, typeof(&src_ipv4.v), p,
"%pI4");
PRINT_MASKED_VALP(dst_ipv4, typeof(&dst_ipv4.v), p,
"%pI4");
} else if (ethertype.v == ETH_P_IPV6) {
static const struct in6_addr full_ones = {
.in6_u.u6_addr32 = {htonl(0xffffffff),
htonl(0xffffffff),
htonl(0xffffffff),
htonl(0xffffffff)},
};
DECLARE_MASK_VAL(struct in6_addr, src_ipv6);
DECLARE_MASK_VAL(struct in6_addr, dst_ipv6);
memcpy(src_ipv6.m.in6_u.u6_addr8,
MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
sizeof(src_ipv6.m));
memcpy(dst_ipv6.m.in6_u.u6_addr8,
MLX5_ADDR_OF(fte_match_set_lyr_2_4, mask,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
sizeof(dst_ipv6.m));
memcpy(src_ipv6.v.in6_u.u6_addr8,
MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
src_ipv4_src_ipv6.ipv6_layout.ipv6),
sizeof(src_ipv6.v));
memcpy(dst_ipv6.v.in6_u.u6_addr8,
MLX5_ADDR_OF(fte_match_set_lyr_2_4, value,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
sizeof(dst_ipv6.v));
if (!memcmp(&src_ipv6.m, &full_ones, sizeof(full_ones)))
trace_seq_printf(p, "src_ipv6=%pI6 ",
&src_ipv6.v);
if (!memcmp(&dst_ipv6.m, &full_ones, sizeof(full_ones)))
trace_seq_printf(p, "dst_ipv6=%pI6 ",
&dst_ipv6.v);
}
}
#define PRINT_MASKED_VAL_L2(type, name, fld, p, format) {\
MASK_VAL_L2(type, name, fld); \
PRINT_MASKED_VAL(name, p, format); \
}
PRINT_MASKED_VAL_L2(u8, ip_protocol, ip_protocol, p, "%02x");
PRINT_MASKED_VAL_L2(u16, tcp_flags, tcp_flags, p, "%x");
PRINT_MASKED_VAL_L2(u16, tcp_sport, tcp_sport, p, "%u");
PRINT_MASKED_VAL_L2(u16, tcp_dport, tcp_dport, p, "%u");
PRINT_MASKED_VAL_L2(u16, udp_sport, udp_sport, p, "%u");
PRINT_MASKED_VAL_L2(u16, udp_dport, udp_dport, p, "%u");
PRINT_MASKED_VAL_L2(u16, first_vid, first_vid, p, "%04x");
PRINT_MASKED_VAL_L2(u8, first_prio, first_prio, p, "%x");
PRINT_MASKED_VAL_L2(u8, first_cfi, first_cfi, p, "%d");
PRINT_MASKED_VAL_L2(u8, ip_dscp, ip_dscp, p, "%02x");
PRINT_MASKED_VAL_L2(u8, ip_ecn, ip_ecn, p, "%x");
PRINT_MASKED_VAL_L2(u8, cvlan_tag, cvlan_tag, p, "%d");
PRINT_MASKED_VAL_L2(u8, svlan_tag, svlan_tag, p, "%d");
PRINT_MASKED_VAL_L2(u8, frag, frag, p, "%d");
}
static void print_misc_parameters_hdrs(struct trace_seq *p,
const u32 *mask, const u32 *value)
{
#define MASK_VAL_MISC(type, name, fld) \
MASK_VAL(type, fte_match_set_misc, name, mask, value, fld)
#define PRINT_MASKED_VAL_MISC(type, name, fld, p, format) {\
MASK_VAL_MISC(type, name, fld); \
PRINT_MASKED_VAL(name, p, format); \
}
DECLARE_MASK_VAL(u64, gre_key) = {
.m = MLX5_GET(fte_match_set_misc, mask, gre_key_h) << 8 |
MLX5_GET(fte_match_set_misc, mask, gre_key_l),
.v = MLX5_GET(fte_match_set_misc, value, gre_key_h) << 8 |
MLX5_GET(fte_match_set_misc, value, gre_key_l)};
PRINT_MASKED_VAL(gre_key, p, "%llu");
PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u");
PRINT_MASKED_VAL_MISC(u16, source_port, source_port, p, "%u");
PRINT_MASKED_VAL_MISC(u8, outer_second_prio, outer_second_prio,
p, "%u");
PRINT_MASKED_VAL_MISC(u8, outer_second_cfi, outer_second_cfi, p, "%u");
PRINT_MASKED_VAL_MISC(u16, outer_second_vid, outer_second_vid, p, "%u");
PRINT_MASKED_VAL_MISC(u8, inner_second_prio, inner_second_prio,
p, "%u");
PRINT_MASKED_VAL_MISC(u8, inner_second_cfi, inner_second_cfi, p, "%u");
PRINT_MASKED_VAL_MISC(u16, inner_second_vid, inner_second_vid, p, "%u");
PRINT_MASKED_VAL_MISC(u8, outer_second_cvlan_tag,
outer_second_cvlan_tag, p, "%u");
PRINT_MASKED_VAL_MISC(u8, inner_second_cvlan_tag,
inner_second_cvlan_tag, p, "%u");
PRINT_MASKED_VAL_MISC(u8, outer_second_svlan_tag,
outer_second_svlan_tag, p, "%u");
PRINT_MASKED_VAL_MISC(u8, inner_second_svlan_tag,
inner_second_svlan_tag, p, "%u");
PRINT_MASKED_VAL_MISC(u8, gre_protocol, gre_protocol, p, "%u");
PRINT_MASKED_VAL_MISC(u32, vxlan_vni, vxlan_vni, p, "%u");
PRINT_MASKED_VAL_MISC(u32, outer_ipv6_flow_label, outer_ipv6_flow_label,
p, "%x");
PRINT_MASKED_VAL_MISC(u32, inner_ipv6_flow_label, inner_ipv6_flow_label,
p, "%x");
}
const char *parse_fs_hdrs(struct trace_seq *p,
u8 match_criteria_enable,
const u32 *mask_outer,
const u32 *mask_misc,
const u32 *mask_inner,
const u32 *value_outer,
const u32 *value_misc,
const u32 *value_inner)
{
const char *ret = trace_seq_buffer_ptr(p);
if (match_criteria_enable &
1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_OUTER_HEADERS) {
trace_seq_printf(p, "[outer] ");
print_lyr_2_4_hdrs(p, mask_outer, value_outer);
}
if (match_criteria_enable &
1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS) {
trace_seq_printf(p, "[misc] ");
print_misc_parameters_hdrs(p, mask_misc, value_misc);
}
if (match_criteria_enable &
1 << MLX5_CREATE_FLOW_GROUP_IN_MATCH_CRITERIA_ENABLE_INNER_HEADERS) {
trace_seq_printf(p, "[inner] ");
print_lyr_2_4_hdrs(p, mask_inner, value_inner);
}
trace_seq_putc(p, 0);
return ret;
}
const char *parse_fs_dst(struct trace_seq *p,
const struct mlx5_flow_destination *dst,
u32 counter_id)
{
const char *ret = trace_seq_buffer_ptr(p);
switch (dst->type) {
case MLX5_FLOW_DESTINATION_TYPE_VPORT:
trace_seq_printf(p, "vport=%u\n", dst->vport_num);
break;
case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE:
trace_seq_printf(p, "ft=%p\n", dst->ft);
break;
case MLX5_FLOW_DESTINATION_TYPE_TIR:
trace_seq_printf(p, "tir=%u\n", dst->tir_num);
break;
case MLX5_FLOW_DESTINATION_TYPE_COUNTER:
trace_seq_printf(p, "counter_id=%u\n", counter_id);
break;
}
trace_seq_putc(p, 0);
return ret;
}
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_fg);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_fg);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_set_fte);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_fte);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_add_rule);
EXPORT_TRACEPOINT_SYMBOL(mlx5_fs_del_rule);
/*
* Copyright (c) 2017, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#if !defined(_MLX5_FS_TP_) || defined(TRACE_HEADER_MULTI_READ)
#define _MLX5_FS_TP_
#include <linux/tracepoint.h>
#include <linux/trace_seq.h>
#include "../fs_core.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mlx5
#define __parse_fs_hdrs(match_criteria_enable, mouter, mmisc, minner, vouter, \
vinner, vmisc) \
parse_fs_hdrs(p, match_criteria_enable, mouter, mmisc, minner, vouter,\
vinner, vmisc)
const char *parse_fs_hdrs(struct trace_seq *p,
u8 match_criteria_enable,
const u32 *mask_outer,
const u32 *mask_misc,
const u32 *mask_inner,
const u32 *value_outer,
const u32 *value_misc,
const u32 *value_inner);
#define __parse_fs_dst(dst, counter_id) \
parse_fs_dst(p, (const struct mlx5_flow_destination *)dst, counter_id)
const char *parse_fs_dst(struct trace_seq *p,
const struct mlx5_flow_destination *dst,
u32 counter_id);
TRACE_EVENT(mlx5_fs_add_fg,
TP_PROTO(const struct mlx5_flow_group *fg),
TP_ARGS(fg),
TP_STRUCT__entry(
__field(const struct mlx5_flow_group *, fg)
__field(const struct mlx5_flow_table *, ft)
__field(u32, start_index)
__field(u32, end_index)
__field(u32, id)
__field(u8, mask_enable)
__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
),
TP_fast_assign(
__entry->fg = fg;
fs_get_obj(__entry->ft, fg->node.parent);
__entry->start_index = fg->start_index;
__entry->end_index = fg->start_index + fg->max_ftes;
__entry->id = fg->id;
__entry->mask_enable = fg->mask.match_criteria_enable;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&fg->mask.match_criteria,
outer_headers),
sizeof(__entry->mask_outer));
memcpy(__entry->mask_inner,
MLX5_ADDR_OF(fte_match_param,
&fg->mask.match_criteria,
inner_headers),
sizeof(__entry->mask_inner));
memcpy(__entry->mask_misc,
MLX5_ADDR_OF(fte_match_param,
&fg->mask.match_criteria,
misc_parameters),
sizeof(__entry->mask_misc));
),
TP_printk("fg=%p ft=%p id=%u start=%u end=%u bit_mask=%02x %s\n",
__entry->fg, __entry->ft, __entry->id,
__entry->start_index, __entry->end_index,
__entry->mask_enable,
__parse_fs_hdrs(__entry->mask_enable,
__entry->mask_outer,
__entry->mask_misc,
__entry->mask_inner,
__entry->mask_outer,
__entry->mask_misc,
__entry->mask_inner))
);
TRACE_EVENT(mlx5_fs_del_fg,
TP_PROTO(const struct mlx5_flow_group *fg),
TP_ARGS(fg),
TP_STRUCT__entry(
__field(const struct mlx5_flow_group *, fg)
__field(u32, id)
),
TP_fast_assign(
__entry->fg = fg;
__entry->id = fg->id;
),
TP_printk("fg=%p id=%u\n",
__entry->fg, __entry->id)
);
#define ACTION_FLAGS \
{MLX5_FLOW_CONTEXT_ACTION_ALLOW, "ALLOW"},\
{MLX5_FLOW_CONTEXT_ACTION_DROP, "DROP"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, "FWD"},\
{MLX5_FLOW_CONTEXT_ACTION_COUNT, "CNT"},\
{MLX5_FLOW_CONTEXT_ACTION_ENCAP, "ENCAP"},\
{MLX5_FLOW_CONTEXT_ACTION_DECAP, "DECAP"},\
{MLX5_FLOW_CONTEXT_ACTION_MOD_HDR, "MOD_HDR"},\
{MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO, "NEXT_PRIO"}
TRACE_EVENT(mlx5_fs_set_fte,
TP_PROTO(const struct fs_fte *fte, bool new_fte),
TP_ARGS(fte, new_fte),
TP_STRUCT__entry(
__field(const struct fs_fte *, fte)
__field(const struct mlx5_flow_group *, fg)
__field(u32, group_index)
__field(u32, index)
__field(u32, action)
__field(u32, flow_tag)
__field(u8, mask_enable)
__field(bool, new_fte)
__array(u32, mask_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, mask_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, mask_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
__array(u32, value_outer, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, value_inner, MLX5_ST_SZ_DW(fte_match_set_lyr_2_4))
__array(u32, value_misc, MLX5_ST_SZ_DW(fte_match_set_misc))
),
TP_fast_assign(
__entry->fte = fte;
__entry->new_fte = new_fte;
fs_get_obj(__entry->fg, fte->node.parent);
__entry->group_index = __entry->fg->id;
__entry->index = fte->index;
__entry->action = fte->action;
__entry->mask_enable = __entry->fg->mask.match_criteria_enable;
__entry->flow_tag = fte->flow_tag;
memcpy(__entry->mask_outer,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
outer_headers),
sizeof(__entry->mask_outer));
memcpy(__entry->mask_inner,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
inner_headers),
sizeof(__entry->mask_inner));
memcpy(__entry->mask_misc,
MLX5_ADDR_OF(fte_match_param,
&__entry->fg->mask.match_criteria,
misc_parameters),
sizeof(__entry->mask_misc));
memcpy(__entry->value_outer,
MLX5_ADDR_OF(fte_match_param,
&fte->val,
outer_headers),
sizeof(__entry->value_outer));
memcpy(__entry->value_inner,
MLX5_ADDR_OF(fte_match_param,
&fte->val,
inner_headers),
sizeof(__entry->value_inner));
memcpy(__entry->value_misc,
MLX5_ADDR_OF(fte_match_param,
&fte->val,
misc_parameters),
sizeof(__entry->value_misc));
),
TP_printk("op=%s fte=%p fg=%p index=%u group_index=%u action=<%s> flow_tag=%x %s\n",
__entry->new_fte ? "add" : "set",
__entry->fte, __entry->fg, __entry->index,
__entry->group_index, __print_flags(__entry->action, "|",
ACTION_FLAGS),
__entry->flow_tag,
__parse_fs_hdrs(__entry->mask_enable,
__entry->mask_outer,
__entry->mask_misc,
__entry->mask_inner,
__entry->value_outer,
__entry->value_misc,
__entry->value_inner))
);
TRACE_EVENT(mlx5_fs_del_fte,
TP_PROTO(const struct fs_fte *fte),
TP_ARGS(fte),
TP_STRUCT__entry(
__field(const struct fs_fte *, fte)
__field(u32, index)
),
TP_fast_assign(
__entry->fte = fte;
__entry->index = fte->index;
),
TP_printk("fte=%p index=%u\n",
__entry->fte, __entry->index)
);
TRACE_EVENT(mlx5_fs_add_rule,
TP_PROTO(const struct mlx5_flow_rule *rule),
TP_ARGS(rule),
TP_STRUCT__entry(
__field(const struct mlx5_flow_rule *, rule)
__field(const struct fs_fte *, fte)
__field(u32, sw_action)
__field(u32, index)
__field(u32, counter_id)
__array(u8, destination, sizeof(struct mlx5_flow_destination))
),
TP_fast_assign(
__entry->rule = rule;
fs_get_obj(__entry->fte, rule->node.parent);
__entry->index = __entry->fte->dests_size - 1;
__entry->sw_action = rule->sw_action;
memcpy(__entry->destination,
&rule->dest_attr,
sizeof(__entry->destination));
if (rule->dest_attr.type & MLX5_FLOW_DESTINATION_TYPE_COUNTER &&
rule->dest_attr.counter)
__entry->counter_id =
rule->dest_attr.counter->id;
),
TP_printk("rule=%p fte=%p index=%u sw_action=<%s> [dst] %s\n",
__entry->rule, __entry->fte, __entry->index,
__print_flags(__entry->sw_action, "|", ACTION_FLAGS),
__parse_fs_dst(__entry->destination, __entry->counter_id))
);
TRACE_EVENT(mlx5_fs_del_rule,
TP_PROTO(const struct mlx5_flow_rule *rule),
TP_ARGS(rule),
TP_STRUCT__entry(
__field(const struct mlx5_flow_rule *, rule)
__field(const struct fs_fte *, fte)
),
TP_fast_assign(
__entry->rule = rule;
fs_get_obj(__entry->fte, rule->node.parent);
),
TP_printk("rule=%p fte=%p\n",
__entry->rule, __entry->fte)
);
#endif
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH ./diag
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE fs_tracepoint
#include <trace/define_trace.h>
......@@ -254,8 +254,8 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
pcie_perf_stats_desc64[i].format);
for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_perf_stall_stats_desc[i].format);
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_perf_stall_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
......
......@@ -188,8 +188,8 @@ static enum mlx5_dev_event port_subtype_event(u8 subtype)
static void eq_update_ci(struct mlx5_eq *eq, int arm)
{
__be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2);
u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24);
__raw_writel((__force u32)cpu_to_be32(val), addr);
/* We still want ordering, just not swabbing, so add a barrier */
mb();
......
......@@ -263,7 +263,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->modify_id);
in_match_value = MLX5_ADDR_OF(flow_context, in_flow_context,
match_value);
memcpy(in_match_value, &fte->val, MLX5_ST_SZ_BYTES(fte_match_param));
memcpy(in_match_value, &fte->val, sizeof(fte->val));
in_dests = MLX5_ADDR_OF(flow_context, in_flow_context, destination);
if (fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
......
......@@ -34,6 +34,7 @@
#define _MLX5_FS_CORE_
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
enum fs_node_type {
FS_TYPE_NAMESPACE,
......@@ -118,6 +119,8 @@ struct mlx5_flow_table {
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
u32 flags;
struct ida fte_allocator;
struct rhltable fgs_hash;
};
struct mlx5_fc_cache {
......@@ -143,10 +146,22 @@ struct mlx5_fc {
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_600
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
*/
#define MLX5_ST_SZ_DW_MATCH_PARAM \
((MLX5_BYTE_OFF(fte_match_param, MLX5_FTE_MATCH_PARAM_RESERVED) / sizeof(u32)) + \
BUILD_BUG_ON_ZERO(MLX5_ST_SZ_BYTES(fte_match_param) != \
MLX5_FLD_SZ_BYTES(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED) +\
MLX5_BYTE_OFF(fte_match_param, \
MLX5_FTE_MATCH_PARAM_RESERVED)))
/* Type of children is mlx5_flow_rule */
struct fs_fte {
struct fs_node node;
u32 val[MLX5_ST_SZ_DW(fte_match_param)];
u32 val[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 dests_size;
u32 flow_tag;
u32 index;
......@@ -155,6 +170,7 @@ struct fs_fte {
u32 modify_id;
enum fs_fte_status status;
struct mlx5_fc *counter;
struct rhash_head hash;
};
/* Type of children is mlx5_flow_table/namespace */
......@@ -174,7 +190,7 @@ struct mlx5_flow_namespace {
struct mlx5_flow_group_mask {
u8 match_criteria_enable;
u32 match_criteria[MLX5_ST_SZ_DW(fte_match_param)];
u32 match_criteria[MLX5_ST_SZ_DW_MATCH_PARAM];
};
/* Type of children is fs_fte */
......@@ -183,8 +199,9 @@ struct mlx5_flow_group {
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
u32 num_ftes;
u32 id;
struct rhashtable ftes_hash;
struct rhlist_head hash;
};
struct mlx5_flow_root_namespace {
......
......@@ -48,7 +48,7 @@
/* helper macros */
#define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
#define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
#define __mlx5_bit_off(typ, fld) ((unsigned)(unsigned long)(&(__mlx5_nullp(typ)->fld)))
#define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
#define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
#define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
#define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
......
......@@ -890,8 +890,6 @@ static inline void *mlx5_buf_offset(struct mlx5_buf *buf, int offset)
return buf->direct.buf + offset;
}
extern struct workqueue_struct *mlx5_core_wq;
#define STRUCT_FIELD(header, field) \
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment