Commit 5cc921a3 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'mlx5-updates-2021-01-26'

Saeed Mahameed says:

====================
mlx5 devlink traps support

Add support for devlink traps [1] reporting in mlx5,
mlx5 will also report/trap packets filtered due to dest mac steering miss

The first patch in the series defines the new DMAC trap type in devlink
for this purpose.

Other patches in the series are mlx5 only and they gradually traps support.
Supported traps:

DMAC: Drops due to destination MAC not configured in the MAC table
VLAN: Drops due to vlan not configured in the vlan table

Design note:
devlink instance is managed by the low level mlx5 core layer, mlx5 core will
serve as an abstraction layer for trap reporting, since we might have multiple
mlx5 interfaces who might want to report traps on the same device.
====================

Link: https://lore.kernel.org/r/20210126232419.175836-1-saeedm@nvidia.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 63368a74 eb3862a0
......@@ -480,6 +480,11 @@ be added to the following table:
- ``drop``
- Traps packets that the device decided to drop in case they hit a
blackhole nexthop
* - ``dmac_filter``
- ``drop``
- Traps incoming packets that the device decided to drop because
the destination MAC is not configured in the MAC table and
the interface is not in promiscuous mode
Driver-specific Packet Traps
============================
......
......@@ -27,7 +27,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_selftest.o en/port.o en/monitor_stats.o en/health.o \
en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \
en/xsk/setup.o en/xsk/rx.o en/xsk/tx.o en/devlink.o en/ptp.o \
en/qos.o
en/qos.o en/trap.o
#
# Netdev extra
......
......@@ -168,6 +168,91 @@ static int mlx5_devlink_reload_up(struct devlink *devlink, enum devlink_reload_a
return 0;
}
static struct mlx5_devlink_trap *mlx5_find_trap_by_id(struct mlx5_core_dev *dev, int trap_id)
{
struct mlx5_devlink_trap *dl_trap;
list_for_each_entry(dl_trap, &dev->priv.traps, list)
if (dl_trap->trap.id == trap_id)
return dl_trap;
return NULL;
}
static int mlx5_devlink_trap_init(struct devlink *devlink, const struct devlink_trap *trap,
void *trap_ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_devlink_trap *dl_trap;
dl_trap = kzalloc(sizeof(*dl_trap), GFP_KERNEL);
if (!dl_trap)
return -ENOMEM;
dl_trap->trap.id = trap->id;
dl_trap->trap.action = DEVLINK_TRAP_ACTION_DROP;
dl_trap->item = trap_ctx;
if (mlx5_find_trap_by_id(dev, trap->id)) {
kfree(dl_trap);
mlx5_core_err(dev, "Devlink trap: Trap 0x%x already found", trap->id);
return -EEXIST;
}
list_add_tail(&dl_trap->list, &dev->priv.traps);
return 0;
}
static void mlx5_devlink_trap_fini(struct devlink *devlink, const struct devlink_trap *trap,
void *trap_ctx)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
struct mlx5_devlink_trap *dl_trap;
dl_trap = mlx5_find_trap_by_id(dev, trap->id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Missing trap id 0x%x", trap->id);
return;
}
list_del(&dl_trap->list);
kfree(dl_trap);
}
static int mlx5_devlink_trap_action_set(struct devlink *devlink,
const struct devlink_trap *trap,
enum devlink_trap_action action,
struct netlink_ext_ack *extack)
{
struct mlx5_core_dev *dev = devlink_priv(devlink);
enum devlink_trap_action action_orig;
struct mlx5_devlink_trap *dl_trap;
int err = 0;
dl_trap = mlx5_find_trap_by_id(dev, trap->id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Set action on invalid trap id 0x%x", trap->id);
err = -EINVAL;
goto out;
}
if (action != DEVLINK_TRAP_ACTION_DROP && action != DEVLINK_TRAP_ACTION_TRAP) {
err = -EOPNOTSUPP;
goto out;
}
if (action == dl_trap->trap.action)
goto out;
action_orig = dl_trap->trap.action;
dl_trap->trap.action = action;
err = mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_TYPE_TRAP,
&dl_trap->trap);
if (err)
dl_trap->trap.action = action_orig;
out:
return err;
}
static const struct devlink_ops mlx5_devlink_ops = {
#ifdef CONFIG_MLX5_ESWITCH
.eswitch_mode_set = mlx5_devlink_eswitch_mode_set,
......@@ -186,8 +271,59 @@ static const struct devlink_ops mlx5_devlink_ops = {
.reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET),
.reload_down = mlx5_devlink_reload_down,
.reload_up = mlx5_devlink_reload_up,
.trap_init = mlx5_devlink_trap_init,
.trap_fini = mlx5_devlink_trap_fini,
.trap_action_set = mlx5_devlink_trap_action_set,
};
void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
struct devlink_port *dl_port)
{
struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_devlink_trap *dl_trap;
dl_trap = mlx5_find_trap_by_id(dev, trap_id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Report on invalid trap id 0x%x", trap_id);
return;
}
if (dl_trap->trap.action != DEVLINK_TRAP_ACTION_TRAP) {
mlx5_core_dbg(dev, "Devlink trap: Trap id %d has action %d", trap_id,
dl_trap->trap.action);
return;
}
devlink_trap_report(devlink, skb, dl_trap->item, dl_port, NULL);
}
int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev)
{
struct mlx5_devlink_trap *dl_trap;
int count = 0;
list_for_each_entry(dl_trap, &dev->priv.traps, list)
if (dl_trap->trap.action == DEVLINK_TRAP_ACTION_TRAP)
count++;
return count;
}
int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
enum devlink_trap_action *action)
{
struct mlx5_devlink_trap *dl_trap;
dl_trap = mlx5_find_trap_by_id(dev, trap_id);
if (!dl_trap) {
mlx5_core_err(dev, "Devlink trap: Get action on invalid trap id 0x%x",
trap_id);
return -EINVAL;
}
*action = dl_trap->trap.action;
return 0;
}
struct devlink *mlx5_devlink_alloc(void)
{
return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev));
......@@ -358,6 +494,49 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
#endif
}
#define MLX5_TRAP_DROP(_id, _group_id) \
DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT)
static const struct devlink_trap mlx5_traps_arr[] = {
MLX5_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
MLX5_TRAP_DROP(DMAC_FILTER, L2_DROPS),
};
static const struct devlink_trap_group mlx5_trap_groups_arr[] = {
DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
};
static int mlx5_devlink_traps_register(struct devlink *devlink)
{
struct mlx5_core_dev *core_dev = devlink_priv(devlink);
int err;
err = devlink_trap_groups_register(devlink, mlx5_trap_groups_arr,
ARRAY_SIZE(mlx5_trap_groups_arr));
if (err)
return err;
err = devlink_traps_register(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr),
&core_dev->priv);
if (err)
goto err_trap_group;
return 0;
err_trap_group:
devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
ARRAY_SIZE(mlx5_trap_groups_arr));
return err;
}
static void mlx5_devlink_traps_unregister(struct devlink *devlink)
{
devlink_traps_unregister(devlink, mlx5_traps_arr, ARRAY_SIZE(mlx5_traps_arr));
devlink_trap_groups_unregister(devlink, mlx5_trap_groups_arr,
ARRAY_SIZE(mlx5_trap_groups_arr));
}
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
{
int err;
......@@ -372,8 +551,16 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
goto params_reg_err;
mlx5_devlink_set_params_init_values(devlink);
devlink_params_publish(devlink);
err = mlx5_devlink_traps_register(devlink);
if (err)
goto traps_reg_err;
return 0;
traps_reg_err:
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
params_reg_err:
devlink_unregister(devlink);
return err;
......@@ -381,6 +568,7 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
void mlx5_devlink_unregister(struct devlink *devlink)
{
mlx5_devlink_traps_unregister(devlink);
devlink_params_unregister(devlink, mlx5_devlink_params,
ARRAY_SIZE(mlx5_devlink_params));
devlink_unregister(devlink);
......
......@@ -12,6 +12,24 @@ enum mlx5_devlink_param_id {
MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
};
struct mlx5_trap_ctx {
int id;
int action;
};
struct mlx5_devlink_trap {
struct mlx5_trap_ctx trap;
void *item;
struct list_head list;
};
struct mlx5_core_dev;
void mlx5_devlink_trap_report(struct mlx5_core_dev *dev, int trap_id, struct sk_buff *skb,
struct devlink_port *dl_port);
int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev);
int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id,
enum devlink_trap_action *action);
struct devlink *mlx5_devlink_alloc(void);
void mlx5_devlink_free(struct devlink *devlink);
int mlx5_devlink_register(struct devlink *devlink, struct device *dev);
......
......@@ -564,6 +564,7 @@ typedef bool (*mlx5e_fp_post_rx_wqes)(struct mlx5e_rq *rq);
typedef void (*mlx5e_fp_dealloc_wqe)(struct mlx5e_rq*, u16);
int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk);
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params);
enum mlx5e_rq_flag {
MLX5E_RQ_FLAG_XDP_XMIT,
......@@ -805,6 +806,8 @@ struct mlx5e_htb {
u16 defcls;
};
struct mlx5e_trap;
struct mlx5e_priv {
/* priv data path fields - start */
/* +1 for port ptp ts */
......@@ -844,8 +847,10 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_trap *en_trap;
struct mlx5e_stats stats;
struct mlx5e_channel_stats channel_stats[MLX5E_MAX_NUM_CHANNELS];
struct mlx5e_channel_stats trap_stats;
struct mlx5e_port_ptp_stats port_ptp_stats;
u16 max_nch;
u8 max_opened_tc;
......@@ -854,6 +859,7 @@ struct mlx5e_priv {
u16 q_counter;
u16 drop_rq_q_counter;
struct notifier_block events_nb;
struct notifier_block blocking_events_nb;
int num_tc_x_num_ch;
struct udp_tunnel_nic_info nic_info;
......@@ -961,6 +967,8 @@ int mlx5e_open_rq(struct mlx5e_channel *c, struct mlx5e_params *params,
int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time);
void mlx5e_deactivate_rq(struct mlx5e_rq *rq);
void mlx5e_close_rq(struct mlx5e_rq *rq);
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param);
void mlx5e_destroy_rq(struct mlx5e_rq *rq);
struct mlx5e_sq_param;
int mlx5e_open_icosq(struct mlx5e_channel *c, struct mlx5e_params *params,
......@@ -1072,6 +1080,8 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
......
......@@ -44,6 +44,11 @@ struct mlx5e_l2_rule {
#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
struct mlx5e_promisc_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *rule;
};
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
......@@ -53,6 +58,7 @@ struct mlx5e_vlan_table {
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
struct mlx5_flow_handle *trap_rule;
bool cvlan_filter_disabled;
};
......@@ -62,7 +68,7 @@ struct mlx5e_l2_table {
struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
struct mlx5e_l2_rule broadcast;
struct mlx5e_l2_rule allmulti;
struct mlx5e_l2_rule promisc;
struct mlx5_flow_handle *trap_rule;
bool broadcast_enabled;
bool allmulti_enabled;
bool promisc_enabled;
......@@ -126,7 +132,8 @@ struct mlx5e_ttc_table {
/* NIC prio FTS */
enum {
MLX5E_VLAN_FT_LEVEL = 0,
MLX5E_PROMISC_FT_LEVEL,
MLX5E_VLAN_FT_LEVEL,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
......@@ -241,6 +248,7 @@ struct mlx5e_flow_steering {
struct mlx5e_ethtool_steering ethtool;
#endif
struct mlx5e_tc_table tc;
struct mlx5e_promisc_table promisc;
struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
......@@ -288,6 +296,10 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt);
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv);
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num);
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv);
#endif /* __MLX5E_FLOW_STEER_H__ */
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies */
#include <net/page_pool.h>
#include "en/txrx.h"
#include "en/params.h"
#include "en/trap.h"
static int mlx5e_trap_napi_poll(struct napi_struct *napi, int budget)
{
struct mlx5e_trap *trap_ctx = container_of(napi, struct mlx5e_trap, napi);
struct mlx5e_ch_stats *ch_stats = trap_ctx->stats;
struct mlx5e_rq *rq = &trap_ctx->rq;
bool busy = false;
int work_done = 0;
ch_stats->poll++;
work_done = mlx5e_poll_rx_cq(&rq->cq, budget);
busy |= work_done == budget;
busy |= rq->post_wqes(rq);
if (busy)
return budget;
if (unlikely(!napi_complete_done(napi, work_done)))
return work_done;
mlx5e_cq_arm(&rq->cq);
return work_done;
}
static int mlx5e_alloc_trap_rq(struct mlx5e_priv *priv, struct mlx5e_rq_param *rqp,
struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
struct mlx5e_ch_stats *ch_stats,
struct mlx5e_rq *rq)
{
void *rqc_wq = MLX5_ADDR_OF(rqc, rqp->rqc, wq);
struct mlx5_core_dev *mdev = priv->mdev;
struct page_pool_params pp_params = {};
int node = dev_to_node(mdev->device);
u32 pool_size;
int wq_sz;
int err;
int i;
rqp->wq.db_numa_node = node;
rq->wq_type = params->rq_wq_type;
rq->pdev = mdev->device;
rq->netdev = priv->netdev;
rq->mdev = mdev;
rq->priv = priv;
rq->stats = stats;
rq->clock = &mdev->clock;
rq->tstamp = &priv->tstamp;
rq->hw_mtu = MLX5E_SW2HW_MTU(params, params->sw_mtu);
xdp_rxq_info_unused(&rq->xdp_rxq);
rq->buff.map_dir = DMA_FROM_DEVICE;
rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, NULL);
pool_size = 1 << params->log_rq_mtu_frames;
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq, &rq->wq_ctrl);
if (err)
return err;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
wq_sz = mlx5_wq_cyc_get_size(&rq->wqe.wq);
rq->wqe.info = rqp->frags_info;
rq->buff.frame0_sz = rq->wqe.info.arr[0].frag_stride;
rq->wqe.frags = kvzalloc_node(array_size(sizeof(*rq->wqe.frags),
(wq_sz << rq->wqe.info.log_num_frags)),
GFP_KERNEL, node);
if (!rq->wqe.frags) {
err = -ENOMEM;
goto err_wq_cyc_destroy;
}
err = mlx5e_init_di_list(rq, wq_sz, node);
if (err)
goto err_free_frags;
rq->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
mlx5e_rq_set_trap_handlers(rq, params);
/* Create a page_pool and register it with rxq */
pp_params.order = 0;
pp_params.flags = 0; /* No-internal DMA mapping in page_pool */
pp_params.pool_size = pool_size;
pp_params.nid = node;
pp_params.dev = mdev->device;
pp_params.dma_dir = rq->buff.map_dir;
/* page_pool can be used even when there is no rq->xdp_prog,
* given page_pool does not handle DMA mapping there is no
* required state to clear. And page_pool gracefully handle
* elevated refcnt.
*/
rq->page_pool = page_pool_create(&pp_params);
if (IS_ERR(rq->page_pool)) {
err = PTR_ERR(rq->page_pool);
rq->page_pool = NULL;
goto err_free_di_list;
}
for (i = 0; i < wq_sz; i++) {
struct mlx5e_rx_wqe_cyc *wqe =
mlx5_wq_cyc_get_wqe(&rq->wqe.wq, i);
int f;
for (f = 0; f < rq->wqe.info.num_frags; f++) {
u32 frag_size = rq->wqe.info.arr[f].frag_size |
MLX5_HW_START_PADDING;
wqe->data[f].byte_count = cpu_to_be32(frag_size);
wqe->data[f].lkey = rq->mkey_be;
}
/* check if num_frags is not a pow of two */
if (rq->wqe.info.num_frags < (1 << rq->wqe.info.log_num_frags)) {
wqe->data[f].byte_count = 0;
wqe->data[f].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
wqe->data[f].addr = 0;
}
}
return 0;
err_free_di_list:
mlx5e_free_di_list(rq);
err_free_frags:
kvfree(rq->wqe.frags);
err_wq_cyc_destroy:
mlx5_wq_destroy(&rq->wq_ctrl);
return err;
}
static void mlx5e_free_trap_rq(struct mlx5e_rq *rq)
{
page_pool_destroy(rq->page_pool);
mlx5e_free_di_list(rq);
kvfree(rq->wqe.frags);
mlx5_wq_destroy(&rq->wq_ctrl);
}
static int mlx5e_open_trap_rq(struct mlx5e_priv *priv, struct napi_struct *napi,
struct mlx5e_rq_stats *stats, struct mlx5e_params *params,
struct mlx5e_rq_param *rq_param,
struct mlx5e_ch_stats *ch_stats,
struct mlx5e_rq *rq)
{
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_create_cq_param ccp = {};
struct dim_cq_moder trap_moder = {};
struct mlx5e_cq *cq = &rq->cq;
int err;
ccp.node = dev_to_node(mdev->device);
ccp.ch_stats = ch_stats;
ccp.napi = napi;
ccp.ix = 0;
err = mlx5e_open_cq(priv, trap_moder, &rq_param->cqp, &ccp, cq);
if (err)
return err;
err = mlx5e_alloc_trap_rq(priv, rq_param, stats, params, ch_stats, rq);
if (err)
goto err_destroy_cq;
err = mlx5e_create_rq(rq, rq_param);
if (err)
goto err_free_rq;
err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
if (err)
goto err_destroy_rq;
return 0;
err_destroy_rq:
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
err_free_rq:
mlx5e_free_trap_rq(rq);
err_destroy_cq:
mlx5e_close_cq(cq);
return err;
}
static void mlx5e_close_trap_rq(struct mlx5e_rq *rq)
{
mlx5e_destroy_rq(rq);
mlx5e_free_rx_descs(rq);
mlx5e_free_trap_rq(rq);
mlx5e_close_cq(&rq->cq);
}
static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir,
u32 rqn)
{
void *tirc;
int inlen;
u32 *in;
int err;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.td.tdn);
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rqn);
err = mlx5e_create_tir(mdev, tir, in);
kvfree(in);
return err;
}
static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir)
{
mlx5e_destroy_tir(mdev, tir);
}
static void mlx5e_activate_trap_rq(struct mlx5e_rq *rq)
{
set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
}
static void mlx5e_deactivate_trap_rq(struct mlx5e_rq *rq)
{
clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
}
static void mlx5e_build_trap_params(struct mlx5e_priv *priv, struct mlx5e_trap *t)
{
struct mlx5e_params *params = &t->params;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
mlx5e_init_rq_type_params(priv->mdev, params);
params->sw_mtu = priv->netdev->max_mtu;
mlx5e_build_rq_param(priv, params, NULL, &t->rq_param);
}
static struct mlx5e_trap *mlx5e_open_trap(struct mlx5e_priv *priv)
{
int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, 0));
struct net_device *netdev = priv->netdev;
struct mlx5e_trap *t;
int err;
t = kvzalloc_node(sizeof(*t), GFP_KERNEL, cpu_to_node(cpu));
if (!t)
return ERR_PTR(-ENOMEM);
mlx5e_build_trap_params(priv, t);
t->priv = priv;
t->mdev = priv->mdev;
t->tstamp = &priv->tstamp;
t->pdev = mlx5_core_dma_dev(priv->mdev);
t->netdev = priv->netdev;
t->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
t->stats = &priv->trap_stats.ch;
netif_napi_add(netdev, &t->napi, mlx5e_trap_napi_poll, 64);
err = mlx5e_open_trap_rq(priv, &t->napi,
&priv->trap_stats.rq,
&t->params, &t->rq_param,
&priv->trap_stats.ch,
&t->rq);
if (unlikely(err))
goto err_napi_del;
err = mlx5e_create_trap_direct_rq_tir(t->mdev, &t->tir, t->rq.rqn);
if (err)
goto err_close_trap_rq;
return t;
err_close_trap_rq:
mlx5e_close_trap_rq(&t->rq);
err_napi_del:
netif_napi_del(&t->napi);
kvfree(t);
return ERR_PTR(err);
}
void mlx5e_close_trap(struct mlx5e_trap *trap)
{
mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir);
mlx5e_close_trap_rq(&trap->rq);
netif_napi_del(&trap->napi);
kvfree(trap);
}
static void mlx5e_activate_trap(struct mlx5e_trap *trap)
{
napi_enable(&trap->napi);
mlx5e_activate_trap_rq(&trap->rq);
napi_schedule(&trap->napi);
}
void mlx5e_deactivate_trap(struct mlx5e_priv *priv)
{
struct mlx5e_trap *trap = priv->en_trap;
mlx5e_deactivate_trap_rq(&trap->rq);
napi_disable(&trap->napi);
}
static struct mlx5e_trap *mlx5e_add_trap_queue(struct mlx5e_priv *priv)
{
struct mlx5e_trap *trap;
trap = mlx5e_open_trap(priv);
if (IS_ERR(trap))
goto out;
mlx5e_activate_trap(trap);
out:
return trap;
}
static void mlx5e_del_trap_queue(struct mlx5e_priv *priv)
{
mlx5e_deactivate_trap(priv);
mlx5e_close_trap(priv->en_trap);
priv->en_trap = NULL;
}
static int mlx5e_trap_get_tirn(struct mlx5e_trap *en_trap)
{
return en_trap->tir.tirn;
}
static int mlx5e_handle_action_trap(struct mlx5e_priv *priv, int trap_id)
{
bool open_queue = !priv->en_trap;
struct mlx5e_trap *trap;
int err;
if (open_queue) {
trap = mlx5e_add_trap_queue(priv);
if (IS_ERR(trap))
return PTR_ERR(trap);
priv->en_trap = trap;
}
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
err = mlx5e_add_vlan_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
err = mlx5e_add_mac_trap(priv, trap_id, mlx5e_trap_get_tirn(priv->en_trap));
if (err)
goto err_out;
break;
default:
netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
err = -EINVAL;
goto err_out;
}
return 0;
err_out:
if (open_queue)
mlx5e_del_trap_queue(priv);
return err;
}
static int mlx5e_handle_action_drop(struct mlx5e_priv *priv, int trap_id)
{
switch (trap_id) {
case DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER:
mlx5e_remove_vlan_trap(priv);
break;
case DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER:
mlx5e_remove_mac_trap(priv);
break;
default:
netdev_warn(priv->netdev, "%s: Unknown trap id %d\n", __func__, trap_id);
return -EINVAL;
}
if (priv->en_trap && !mlx5_devlink_trap_get_num_active(priv->mdev))
mlx5e_del_trap_queue(priv);
return 0;
}
int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx)
{
int err = 0;
/* Traps are unarmed when interface is down, no need to update
* them. The configuration is saved in the core driver,
* queried and applied upon interface up operation in
* mlx5e_open_locked().
*/
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
switch (trap_ctx->action) {
case DEVLINK_TRAP_ACTION_TRAP:
err = mlx5e_handle_action_trap(priv, trap_ctx->id);
break;
case DEVLINK_TRAP_ACTION_DROP:
err = mlx5e_handle_action_drop(priv, trap_ctx->id);
break;
default:
netdev_warn(priv->netdev, "%s: Unsupported action %d\n", __func__,
trap_ctx->action);
err = -EINVAL;
}
return err;
}
static int mlx5e_apply_trap(struct mlx5e_priv *priv, int trap_id, bool enable)
{
enum devlink_trap_action action;
int err;
err = mlx5_devlink_traps_get_action(priv->mdev, trap_id, &action);
if (err)
return err;
if (action == DEVLINK_TRAP_ACTION_TRAP)
err = enable ? mlx5e_handle_action_trap(priv, trap_id) :
mlx5e_handle_action_drop(priv, trap_id);
return err;
}
static const int mlx5e_traps_arr[] = {
DEVLINK_TRAP_GENERIC_ID_INGRESS_VLAN_FILTER,
DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER,
};
int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable)
{
int err;
int i;
for (i = 0; i < ARRAY_SIZE(mlx5e_traps_arr); i++) {
err = mlx5e_apply_trap(priv, mlx5e_traps_arr[i], enable);
if (err)
return err;
}
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2020, Mellanox Technologies */
#ifndef __MLX5E_TRAP_H__
#define __MLX5E_TRAP_H__
#include "../en.h"
#include "../devlink.h"
struct mlx5e_trap {
/* data path */
struct mlx5e_rq rq;
struct mlx5e_tir tir;
struct napi_struct napi;
struct device *pdev;
struct net_device *netdev;
__be32 mkey_be;
/* data path - accessed per napi poll */
struct mlx5e_ch_stats *stats;
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
struct hwtstamp_config *tstamp;
DECLARE_BITMAP(state, MLX5E_CHANNEL_NUM_STATES);
struct mlx5e_params params;
struct mlx5e_rq_param rq_param;
};
void mlx5e_close_trap(struct mlx5e_trap *trap);
void mlx5e_deactivate_trap(struct mlx5e_priv *priv);
int mlx5e_handle_trap_event(struct mlx5e_priv *priv, struct mlx5_trap_ctx *trap_ctx);
int mlx5e_apply_traps(struct mlx5e_priv *priv, bool enable);
#endif
......@@ -46,7 +46,6 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
enum {
MLX5E_FULLMATCH = 0,
MLX5E_ALLMULTI = 1,
MLX5E_PROMISC = 2,
};
enum {
......@@ -306,6 +305,79 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
static struct mlx5_flow_handle *
mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num)
{
struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_handle *rule;
struct mlx5_flow_spec *spec;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return ERR_PTR(-ENOMEM);
spec->flow_context.flags |= FLOW_CONTEXT_HAS_TAG;
spec->flow_context.flow_tag = trap_id;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tir_num;
rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
kvfree(spec);
return rule;
}
int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
priv->fs.vlan.trap_rule = NULL;
netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n",
__func__, err);
return err;
}
priv->fs.vlan.trap_rule = rule;
return 0;
}
void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv)
{
if (priv->fs.vlan.trap_rule) {
mlx5_del_flow_rules(priv->fs.vlan.trap_rule);
priv->fs.vlan.trap_rule = NULL;
}
}
int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num)
{
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
struct mlx5_flow_handle *rule;
int err;
rule = mlx5e_add_trap_rule(ft, trap_id, tir_num);
if (IS_ERR(rule)) {
err = PTR_ERR(rule);
priv->fs.l2.trap_rule = NULL;
netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n",
__func__, err);
return err;
}
priv->fs.l2.trap_rule = rule;
return 0;
}
void mlx5e_remove_mac_trap(struct mlx5e_priv *priv)
{
if (priv->fs.l2.trap_rule) {
mlx5_del_flow_rules(priv->fs.l2.trap_rule);
priv->fs.l2.trap_rule = NULL;
}
}
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
if (!priv->fs.vlan.cvlan_filter_disabled)
......@@ -419,6 +491,8 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state)));
mlx5e_remove_vlan_trap(priv);
/* must be called after DESTROY bit is set and
* set_rx_mode is called and flushed
*/
......@@ -596,6 +670,83 @@ static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
mlx5e_apply_netdev_addr(priv);
}
#define MLX5E_PROMISC_GROUP0_SIZE BIT(0)
#define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE
static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv)
{
struct mlx5_flow_table *ft = priv->fs.promisc.ft.t;
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
int err = 0;
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.ttc.ft.t;
rule_p = &priv->fs.promisc.rule;
*rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
if (IS_ERR(*rule_p)) {
err = PTR_ERR(*rule_p);
*rule_p = NULL;
netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__);
}
kvfree(spec);
return err;
}
static int mlx5e_create_promisc_table(struct mlx5e_priv *priv)
{
struct mlx5e_flow_table *ft = &priv->fs.promisc.ft;
struct mlx5_flow_table_attr ft_attr = {};
int err;
ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE;
ft_attr.autogroup.max_num_groups = 1;
ft_attr.level = MLX5E_PROMISC_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t);
netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err);
return err;
}
err = mlx5e_add_promisc_rule(priv);
if (err)
goto err_destroy_promisc_table;
return 0;
err_destroy_promisc_table:
mlx5_destroy_flow_table(ft->t);
ft->t = NULL;
return err;
}
static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv)
{
if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule"))
return;
mlx5_del_flow_rules(priv->fs.promisc.rule);
priv->fs.promisc.rule = NULL;
}
static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv)
{
if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table"))
return;
mlx5e_del_promisc_rule(priv);
mlx5_destroy_flow_table(priv->fs.promisc.ft.t);
priv->fs.promisc.ft.t = NULL;
}
void mlx5e_set_rx_mode_work(struct work_struct *work)
{
struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
......@@ -615,14 +766,15 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
int err;
if (enable_promisc) {
if (!priv->channels.params.vlan_strip_disable)
err = mlx5e_create_promisc_table(priv);
if (err)
enable_promisc = false;
if (!priv->channels.params.vlan_strip_disable && !err)
netdev_warn_once(ndev,
"S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
......@@ -635,11 +787,8 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
if (disable_allmulti)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv);
mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
if (disable_promisc)
mlx5e_destroy_promisc_table(priv);
ea->promisc_enabled = promisc_enabled;
ea->allmulti_enabled = allmulti_enabled;
......@@ -1306,9 +1455,6 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
mc_dmac[0] = 0x01;
mv_dmac[0] = 0x01;
break;
case MLX5E_PROMISC:
break;
}
ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
......@@ -1325,12 +1471,12 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
}
#define MLX5E_NUM_L2_GROUPS 3
#define MLX5E_L2_GROUP1_SIZE BIT(0)
#define MLX5E_L2_GROUP2_SIZE BIT(15)
#define MLX5E_L2_GROUP3_SIZE BIT(0)
#define MLX5E_L2_GROUP1_SIZE BIT(15)
#define MLX5E_L2_GROUP2_SIZE BIT(0)
#define MLX5E_L2_GROUP_TRAP_SIZE BIT(0) /* must be last */
#define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
MLX5E_L2_GROUP2_SIZE +\
MLX5E_L2_GROUP3_SIZE)
MLX5E_L2_GROUP_TRAP_SIZE)
static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
......@@ -1353,7 +1499,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
outer_headers.dmac_47_16);
/* Flow Group for promiscuous */
/* Flow Group for full match */
eth_broadcast_addr(mc_dmac);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_L2_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
......@@ -1362,9 +1510,9 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
goto err_destroy_groups;
ft->num_groups++;
/* Flow Group for full match */
eth_broadcast_addr(mc_dmac);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
/* Flow Group for allmulti */
eth_zero_addr(mc_dmac);
mc_dmac[0] = 0x01;
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_L2_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
......@@ -1373,11 +1521,10 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
goto err_destroy_groups;
ft->num_groups++;
/* Flow Group for allmulti */
eth_zero_addr(mc_dmac);
mc_dmac[0] = 0x01;
/* Flow Group for l2 traps */
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_L2_GROUP3_SIZE;
ix += MLX5E_L2_GROUP_TRAP_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
......@@ -1435,15 +1582,17 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
return err;
}
#define MLX5E_NUM_VLAN_GROUPS 4
#define MLX5E_NUM_VLAN_GROUPS 5
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(12)
#define MLX5E_VLAN_GROUP2_SIZE BIT(1)
#define MLX5E_VLAN_GROUP3_SIZE BIT(0)
#define MLX5E_VLAN_GROUP_TRAP_SIZE BIT(0) /* must be last */
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE +\
MLX5E_VLAN_GROUP2_SIZE +\
MLX5E_VLAN_GROUP3_SIZE)
MLX5E_VLAN_GROUP3_SIZE +\
MLX5E_VLAN_GROUP_TRAP_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
......@@ -1498,6 +1647,15 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
memset(in, 0, inlen);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP_TRAP_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
if (IS_ERR(ft->g[ft->num_groups]))
goto err_destroy_groups;
ft->num_groups++;
return 0;
err_destroy_groups:
......
......@@ -66,6 +66,7 @@
#include "lib/mlx5.h"
#include "en/ptp.h"
#include "qos.h"
#include "en/trap.h"
bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
{
......@@ -212,6 +213,33 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
mlx5_notifier_unregister(priv->mdev, &priv->events_nb);
}
static int blocking_event(struct notifier_block *nb, unsigned long event, void *data)
{
struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, blocking_events_nb);
int err;
switch (event) {
case MLX5_DRIVER_EVENT_TYPE_TRAP:
err = mlx5e_handle_trap_event(priv, data);
break;
default:
netdev_warn(priv->netdev, "Sync event: Unknouwn event %ld\n", event);
err = -EINVAL;
}
return err;
}
static void mlx5e_enable_blocking_events(struct mlx5e_priv *priv)
{
priv->blocking_events_nb.notifier_call = blocking_event;
mlx5_blocking_notifier_register(priv->mdev, &priv->blocking_events_nb);
}
static void mlx5e_disable_blocking_events(struct mlx5e_priv *priv)
{
mlx5_blocking_notifier_unregister(priv->mdev, &priv->blocking_events_nb);
}
static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5e_icosq *sq,
struct mlx5e_umr_wqe *wqe)
......@@ -343,13 +371,11 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true;
}
static int mlx5e_init_di_list(struct mlx5e_rq *rq,
int wq_sz, int cpu)
int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)),
GFP_KERNEL, cpu_to_node(cpu));
rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
if (!rq->wqe.di)
return -ENOMEM;
......@@ -358,7 +384,7 @@ static int mlx5e_init_di_list(struct mlx5e_rq *rq,
return 0;
}
static void mlx5e_free_di_list(struct mlx5e_rq *rq)
void mlx5e_free_di_list(struct mlx5e_rq *rq)
{
kvfree(rq->wqe.di);
}
......@@ -500,7 +526,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
err = mlx5e_init_di_list(rq, wq_sz, c->cpu);
err = mlx5e_init_di_list(rq, wq_sz, cpu_to_node(c->cpu));
if (err)
goto err_rq_frags;
......@@ -651,8 +677,7 @@ static void mlx5e_free_rq(struct mlx5e_rq *rq)
mlx5_wq_destroy(&rq->wq_ctrl);
}
static int mlx5e_create_rq(struct mlx5e_rq *rq,
struct mlx5e_rq_param *param)
int mlx5e_create_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
{
struct mlx5_core_dev *mdev = rq->mdev;
......@@ -775,7 +800,7 @@ static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
return err;
}
static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
void mlx5e_destroy_rq(struct mlx5e_rq *rq)
{
mlx5_core_destroy_rq(rq->mdev, rq->rqn);
}
......@@ -3222,6 +3247,7 @@ int mlx5e_open_locked(struct net_device *netdev)
priv->profile->update_rx(priv);
mlx5e_activate_priv_channels(priv);
mlx5e_apply_traps(priv, true);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
......@@ -3257,6 +3283,7 @@ int mlx5e_close_locked(struct net_device *netdev)
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
mlx5e_apply_traps(priv, false);
clear_bit(MLX5E_STATE_OPENED, &priv->state);
netif_carrier_off(priv->netdev);
......@@ -5344,6 +5371,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
mlx5e_enable_async_events(priv);
mlx5e_enable_blocking_events(priv);
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_init(priv);
......@@ -5381,6 +5409,12 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
if (mlx5e_monitor_counter_supported(priv))
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_blocking_events(priv);
if (priv->en_trap) {
mlx5e_deactivate_trap(priv);
mlx5e_close_trap(priv->en_trap);
priv->en_trap = NULL;
}
mlx5e_disable_async_events(priv);
mlx5_lag_remove(mdev);
mlx5_vxlan_reset_to_default(mdev->vxlan);
......
......@@ -52,6 +52,7 @@
#include "en/xsk/rx.h"
#include "en/health.h"
#include "en/params.h"
#include "devlink.h"
static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
......@@ -1815,3 +1816,48 @@ int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool
return 0;
}
static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
{
struct mlx5e_priv *priv = netdev_priv(rq->netdev);
struct mlx5_wq_cyc *wq = &rq->wqe.wq;
struct mlx5e_wqe_frag_info *wi;
struct sk_buff *skb;
u32 cqe_bcnt;
u16 trap_id;
u16 ci;
trap_id = get_cqe_flow_tag(cqe);
ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
wi = get_frag(rq, ci);
cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
rq->stats->wqe_err++;
goto free_wqe;
}
skb = mlx5e_skb_from_cqe_nonlinear(rq, cqe, wi, cqe_bcnt);
if (!skb)
goto free_wqe;
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
skb_push(skb, ETH_HLEN);
mlx5_devlink_trap_report(rq->mdev, trap_id, skb, &priv->dl_port);
dev_kfree_skb_any(skb);
free_wqe:
mlx5e_free_rx_wqe(rq, wi, false);
mlx5_wq_cyc_pop(wq);
}
void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
{
rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(params, NULL) ?
mlx5e_skb_from_cqe_linear :
mlx5e_skb_from_cqe_nonlinear;
rq->post_wqes = mlx5e_post_rx_wqes;
rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
}
......@@ -23,7 +23,7 @@ static int temp_warn(struct notifier_block *, unsigned long, void *);
static int port_module(struct notifier_block *, unsigned long, void *);
static int pcie_core(struct notifier_block *, unsigned long, void *);
/* handler which forwards the event to events->nh, driver notifiers */
/* handler which forwards the event to events->fw_nh, driver notifiers */
static int forward_event(struct notifier_block *, unsigned long, void *);
static struct mlx5_nb events_nbs_ref[] = {
......@@ -55,12 +55,14 @@ struct mlx5_events {
struct mlx5_core_dev *dev;
struct workqueue_struct *wq;
struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)];
/* driver notifier chain */
struct atomic_notifier_head nh;
/* driver notifier chain for fw events */
struct atomic_notifier_head fw_nh;
/* port module events stats */
struct mlx5_pme_stats pme_stats;
/*pcie_core*/
struct work_struct pcie_core_work;
/* driver notifier chain for sw events */
struct blocking_notifier_head sw_nh;
};
static const char *eqe_type_str(u8 type)
......@@ -331,7 +333,7 @@ static int forward_event(struct notifier_block *nb, unsigned long event, void *d
mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d) forward to interfaces\n",
eqe_type_str(eqe->type), eqe->sub_type);
atomic_notifier_call_chain(&events->nh, event, data);
atomic_notifier_call_chain(&events->fw_nh, event, data);
return NOTIFY_OK;
}
......@@ -342,7 +344,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
if (!events)
return -ENOMEM;
ATOMIC_INIT_NOTIFIER_HEAD(&events->nh);
ATOMIC_INIT_NOTIFIER_HEAD(&events->fw_nh);
events->dev = dev;
dev->priv.events = events;
events->wq = create_singlethread_workqueue("mlx5_events");
......@@ -351,6 +353,7 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
return -ENOMEM;
}
INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
BLOCKING_INIT_NOTIFIER_HEAD(&events->sw_nh);
return 0;
}
......@@ -383,11 +386,14 @@ void mlx5_events_stop(struct mlx5_core_dev *dev)
flush_workqueue(events->wq);
}
/* This API is used only for processing and forwarding firmware
* events to mlx5 consumer.
*/
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
struct mlx5_events *events = dev->priv.events;
return atomic_notifier_chain_register(&events->nh, nb);
return atomic_notifier_chain_register(&events->fw_nh, nb);
}
EXPORT_SYMBOL(mlx5_notifier_register);
......@@ -395,11 +401,36 @@ int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *n
{
struct mlx5_events *events = dev->priv.events;
return atomic_notifier_chain_unregister(&events->nh, nb);
return atomic_notifier_chain_unregister(&events->fw_nh, nb);
}
EXPORT_SYMBOL(mlx5_notifier_unregister);
int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data)
{
return atomic_notifier_call_chain(&events->nh, event, data);
return atomic_notifier_call_chain(&events->fw_nh, event, data);
}
/* This API is used only for processing and forwarding driver-specific
* events to mlx5 consumers.
*/
int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
struct mlx5_events *events = dev->priv.events;
return blocking_notifier_chain_register(&events->sw_nh, nb);
}
int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb)
{
struct mlx5_events *events = dev->priv.events;
return blocking_notifier_chain_unregister(&events->sw_nh, nb);
}
int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
void *data)
{
struct mlx5_events *events = dev->priv.events;
return blocking_notifier_call_chain(&events->sw_nh, event, data);
}
......@@ -105,8 +105,8 @@
#define ETHTOOL_PRIO_NUM_LEVELS 1
#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
#define KERNEL_NIC_PRIO_NUM_LEVELS 6
/* Promiscuous, Vlan, mac, ttc, inner ttc, {aRFS/accel and esp/esp_err} */
#define KERNEL_NIC_PRIO_NUM_LEVELS 7
#define KERNEL_NIC_NUM_PRIOS 1
/* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
......
......@@ -1305,6 +1305,8 @@ static int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
priv->dbg_root = debugfs_create_dir(dev_name(dev->device),
mlx5_debugfs_root);
INIT_LIST_HEAD(&priv->traps);
err = mlx5_health_init(dev);
if (err)
goto err_health_init;
......
......@@ -359,6 +359,10 @@ enum mlx5_event {
MLX5_EVENT_TYPE_MAX = 0x100,
};
enum mlx5_driver_event {
MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
};
enum {
MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
......@@ -899,6 +903,11 @@ static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
return (u64)lo | ((u64)hi << 32);
}
static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
{
return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
}
#define MLX5_MPWQE_LOG_NUM_STRIDES_BASE (9)
#define MLX5_MPWQE_LOG_STRIDE_SZ_BASE (6)
......
......@@ -564,6 +564,7 @@ struct mlx5_priv {
int host_pf_pages;
struct mlx5_core_health health;
struct list_head traps;
/* start: qp staff */
struct dentry *qp_debugfs;
......@@ -1072,11 +1073,26 @@ enum {
MAX_MR_CACHE_ENTRIES
};
/* Async-atomic event notifier used by mlx5 core to forward FW
* evetns recived from event queue to mlx5 consumers.
* Optimise event queue dipatching.
*/
int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
/* Async-atomic event notifier used for forwarding
* evetns from the event queue into the to mlx5 events dispatcher,
* eswitch, clock and others.
*/
int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb);
/* Blocking event notifier used to forward SW events, used for slow path */
int mlx5_blocking_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_blocking_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb);
int mlx5_blocking_notifier_call_chain(struct mlx5_core_dev *dev, unsigned int event,
void *data);
int mlx5_core_query_vendor_id(struct mlx5_core_dev *mdev, u32 *vendor_id);
int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev);
......
......@@ -838,6 +838,7 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_GTP_PARSING,
DEVLINK_TRAP_GENERIC_ID_ESP_PARSING,
DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_NEXTHOP,
DEVLINK_TRAP_GENERIC_ID_DMAC_FILTER,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
......@@ -1063,6 +1064,8 @@ enum devlink_trap_group_generic_id {
"esp_parsing"
#define DEVLINK_TRAP_GENERIC_NAME_BLACKHOLE_NEXTHOP \
"blackhole_nexthop"
#define DEVLINK_TRAP_GENERIC_NAME_DMAC_FILTER \
"dest_mac_filter"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
......
......@@ -9512,6 +9512,7 @@ static const struct devlink_trap devlink_trap_generic[] = {
DEVLINK_TRAP(GTP_PARSING, DROP),
DEVLINK_TRAP(ESP_PARSING, DROP),
DEVLINK_TRAP(BLACKHOLE_NEXTHOP, DROP),
DEVLINK_TRAP(DMAC_FILTER, DROP),
};
#define DEVLINK_TRAP_GROUP(_id) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment