Commit c371e7b4 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-08-10' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-08-10

This series provides the following updates to mlx5e netdevice driver.

1) First 4 patches extends the support for ethtool rxnfc flow steering
   - Added ipv6 support
   - l4 proto ip field for both ip6 and ip4

2) Next 4 patches, reorganizing flow steering structures and declaration into
one header file, and add two Kconfig flags to allow disabling/enabling mlx5
netdevice rx flow steering at compile time:
CONFIG_MLX5_EN_ARFS for en_arfs.c
CONFIG_MLX5_EN_RXNFC for en_fs_ehtool.c

3) More kconfig flags dependencies
- vxlan.c depends on CONFIG_VXLAN
- clock.c depends on CONFIG_PTP_1588_CLOCK

4) Reorganize the Makefile
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 42c625a4 cf916ffb
...@@ -7,6 +7,7 @@ config MLX5_CORE ...@@ -7,6 +7,7 @@ config MLX5_CORE
depends on MAY_USE_DEVLINK depends on MAY_USE_DEVLINK
depends on PCI depends on PCI
imply PTP_1588_CLOCK imply PTP_1588_CLOCK
imply VXLAN
default n default n
---help--- ---help---
Core driver for low level functionality of the ConnectX-4 and Core driver for low level functionality of the ConnectX-4 and
...@@ -35,6 +36,24 @@ config MLX5_CORE_EN ...@@ -35,6 +36,24 @@ config MLX5_CORE_EN
---help--- ---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC. Ethernet support in Mellanox Technologies ConnectX-4 NIC.
config MLX5_EN_ARFS
bool "Mellanox MLX5 ethernet accelerated receive flow steering (ARFS) support"
depends on MLX5_CORE_EN && RFS_ACCEL
default y
---help---
Mellanox MLX5 ethernet hardware-accelerated receive flow steering support,
Enables ethernet netdevice arfs support and ntuple filtering.
config MLX5_EN_RXNFC
bool "Mellanox MLX5 ethernet rx nfc flow steering support"
depends on MLX5_CORE_EN
default y
---help---
Mellanox MLX5 ethernet rx nfc flow steering support
Enables ethtool receive network flow classification, which allows user defined
flow rules to direct traffic into arbitrary rx queue via ethtool set/get_rxnfc
API.
config MLX5_MPFS config MLX5_MPFS
bool "Mellanox Technologies MLX5 MPFS support" bool "Mellanox Technologies MLX5 MPFS support"
depends on MLX5_CORE_EN depends on MLX5_CORE_EN
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o #
# Makefile for Mellanox 5th generation network adapters
# (ConnectX series) core & netdev driver
#
subdir-ccflags-y += -I$(src) subdir-ccflags-y += -I$(src)
obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
#
# mlx5 core basic
#
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
diag/fs_tracepoint.o diag/fw_tracer.o diag/fs_tracepoint.o diag/fw_tracer.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o #
# Netdev basic
mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ #
fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \
en_arfs.o en_fs_ethtool.o en_selftest.o en/port.o lib/vxlan.o en_selftest.o en/port.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o #
# Netdev extra
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.o #
mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o
mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o
#
# Core extra
#
mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o
mlx5_core-$(CONFIG_PTP_1588_CLOCK) += lib/clock.o
#
# Ipoib netdev
#
mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o #
# Accelerations & FPGA
#
mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o accel/tls.o
mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o fpga/tls.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \ mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o en_accel/ipsec_stats.o
mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o mlx5_core-$(CONFIG_MLX5_EN_TLS) += en_accel/tls.o en_accel/tls_rxtx.o en_accel/tls_stats.o
CFLAGS_tracepoint.o := -I$(src) CFLAGS_tracepoint.o := -I$(src)
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
#include "wq.h" #include "wq.h"
#include "mlx5_core.h" #include "mlx5_core.h"
#include "en_stats.h" #include "en_stats.h"
#include "en/fs.h"
struct page_pool; struct page_pool;
...@@ -626,152 +627,12 @@ struct mlx5e_channel_stats { ...@@ -626,152 +627,12 @@ struct mlx5e_channel_stats {
struct mlx5e_xdpsq_stats xdpsq; struct mlx5e_xdpsq_stats xdpsq;
} ____cacheline_aligned_in_smp; } ____cacheline_aligned_in_smp;
enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP,
MLX5E_TT_IPV4_UDP,
MLX5E_TT_IPV6_UDP,
MLX5E_TT_IPV4_IPSEC_AH,
MLX5E_TT_IPV6_IPSEC_AH,
MLX5E_TT_IPV4_IPSEC_ESP,
MLX5E_TT_IPV6_IPSEC_ESP,
MLX5E_TT_IPV4,
MLX5E_TT_IPV6,
MLX5E_TT_ANY,
MLX5E_NUM_TT,
MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
};
enum mlx5e_tunnel_types {
MLX5E_TT_IPV4_GRE,
MLX5E_TT_IPV6_GRE,
MLX5E_NUM_TUNNEL_TT,
};
enum { enum {
MLX5E_STATE_ASYNC_EVENTS_ENABLED, MLX5E_STATE_ASYNC_EVENTS_ENABLED,
MLX5E_STATE_OPENED, MLX5E_STATE_OPENED,
MLX5E_STATE_DESTROYING, MLX5E_STATE_DESTROYING,
}; };
struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2];
struct mlx5_flow_handle *rule;
};
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
};
#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
struct mlx5e_tc_table {
struct mlx5_flow_table *t;
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
DECLARE_HASHTABLE(hairpin_tbl, 8);
};
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
DECLARE_BITMAP(active_svlans, VLAN_N_VID);
struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
bool cvlan_filter_disabled;
};
struct mlx5e_l2_table {
struct mlx5e_flow_table ft;
struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
struct mlx5e_l2_rule broadcast;
struct mlx5e_l2_rule allmulti;
struct mlx5e_l2_rule promisc;
bool broadcast_enabled;
bool allmulti_enabled;
bool promisc_enabled;
};
/* L3/L4 traffic type classifier */
struct mlx5e_ttc_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
};
#define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};
struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};
/* NIC prio FTS */
enum {
MLX5E_VLAN_FT_LEVEL = 0,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
MLX5E_ARFS_FT_LEVEL
};
enum {
MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL,
};
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
};
#define ETHTOOL_NUM_L3_L4_FTS 7
#define ETHTOOL_NUM_L2_FTS 4
struct mlx5e_ethtool_steering {
struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
struct list_head rules;
int tot_num_rules;
};
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
struct mlx5e_ethtool_steering ethtool;
struct mlx5e_tc_table tc;
struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
struct mlx5e_ttc_table inner_ttc;
struct mlx5e_arfs_tables arfs;
};
struct mlx5e_rqt { struct mlx5e_rqt {
u32 rqtn; u32 rqtn;
bool enabled; bool enabled;
...@@ -905,23 +766,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, ...@@ -905,23 +766,10 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
void mlx5e_update_stats(struct mlx5e_priv *priv); void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
void mlx5e_init_l2_addr(struct mlx5e_priv *priv); void mlx5e_init_l2_addr(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
int mlx5e_self_test_num(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv);
void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest,
u64 *buf); u64 *buf);
int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
int location);
int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs);
int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs);
int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
int location);
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work); void mlx5e_set_rx_mode_work(struct work_struct *work);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr); int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
...@@ -932,8 +780,6 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto, ...@@ -932,8 +780,6 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid); u16 vid);
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid); u16 vid);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_timestamp_init(struct mlx5e_priv *priv); void mlx5e_timestamp_init(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param { struct mlx5e_redirect_rqt_param {
...@@ -1050,32 +896,6 @@ void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv); ...@@ -1050,32 +896,6 @@ void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv); void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#endif #endif
#ifndef CONFIG_RFS_ACCEL
static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv)
{
return 0;
}
static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
return -EOPNOTSUPP;
}
static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv)
{
return -EOPNOTSUPP;
}
#else
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
int mlx5e_arfs_enable(struct mlx5e_priv *priv);
int mlx5e_arfs_disable(struct mlx5e_priv *priv);
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#endif
int mlx5e_create_tir(struct mlx5_core_dev *mdev, int mlx5e_create_tir(struct mlx5_core_dev *mdev,
struct mlx5e_tir *tir, u32 *in, int inlen); struct mlx5e_tir *tir, u32 *in, int inlen);
void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, void mlx5e_destroy_tir(struct mlx5_core_dev *mdev,
...@@ -1096,27 +916,6 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); ...@@ -1096,27 +916,6 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
struct ttc_params {
struct mlx5_flow_table_attr ft_attr;
u32 any_tt_tirn;
u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_ttc_table *inner_ttc;
};
void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc);
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn); u32 underlay_qpn, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
......
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/* Copyright (c) 2018 Mellanox Technologies. */
#ifndef __MLX5E_FLOW_STEER_H__
#define __MLX5E_FLOW_STEER_H__
enum {
MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL,
};
struct mlx5e_tc_table {
struct mlx5_flow_table *t;
struct rhashtable ht;
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
DECLARE_HASHTABLE(hairpin_tbl, 8);
};
struct mlx5e_flow_table {
int num_groups;
struct mlx5_flow_table *t;
struct mlx5_flow_group **g;
};
struct mlx5e_l2_rule {
u8 addr[ETH_ALEN + 2];
struct mlx5_flow_handle *rule;
};
#define MLX5E_L2_ADDR_HASH_SIZE BIT(BITS_PER_BYTE)
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
DECLARE_BITMAP(active_svlans, VLAN_N_VID);
struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
bool cvlan_filter_disabled;
};
struct mlx5e_l2_table {
struct mlx5e_flow_table ft;
struct hlist_head netdev_uc[MLX5E_L2_ADDR_HASH_SIZE];
struct hlist_head netdev_mc[MLX5E_L2_ADDR_HASH_SIZE];
struct mlx5e_l2_rule broadcast;
struct mlx5e_l2_rule allmulti;
struct mlx5e_l2_rule promisc;
bool broadcast_enabled;
bool allmulti_enabled;
bool promisc_enabled;
};
enum mlx5e_traffic_types {
MLX5E_TT_IPV4_TCP,
MLX5E_TT_IPV6_TCP,
MLX5E_TT_IPV4_UDP,
MLX5E_TT_IPV6_UDP,
MLX5E_TT_IPV4_IPSEC_AH,
MLX5E_TT_IPV6_IPSEC_AH,
MLX5E_TT_IPV4_IPSEC_ESP,
MLX5E_TT_IPV6_IPSEC_ESP,
MLX5E_TT_IPV4,
MLX5E_TT_IPV6,
MLX5E_TT_ANY,
MLX5E_NUM_TT,
MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY,
};
enum mlx5e_tunnel_types {
MLX5E_TT_IPV4_GRE,
MLX5E_TT_IPV6_GRE,
MLX5E_NUM_TUNNEL_TT,
};
/* L3/L4 traffic type classifier */
struct mlx5e_ttc_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *rules[MLX5E_NUM_TT];
struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT];
};
/* NIC prio FTS */
enum {
MLX5E_VLAN_FT_LEVEL = 0,
MLX5E_L2_FT_LEVEL,
MLX5E_TTC_FT_LEVEL,
MLX5E_INNER_TTC_FT_LEVEL,
#ifdef CONFIG_MLX5_EN_ARFS
MLX5E_ARFS_FT_LEVEL
#endif
};
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft;
int num_rules;
};
#define ETHTOOL_NUM_L3_L4_FTS 7
#define ETHTOOL_NUM_L2_FTS 4
struct mlx5e_ethtool_steering {
struct mlx5e_ethtool_table l3_l4_ft[ETHTOOL_NUM_L3_L4_FTS];
struct mlx5e_ethtool_table l2_ft[ETHTOOL_NUM_L2_FTS];
struct list_head rules;
int tot_num_rules;
};
void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd);
int mlx5e_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rule_locs);
#else
static inline void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { }
static inline void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) { }
#endif /* CONFIG_MLX5_EN_RXNFC */
#ifdef CONFIG_MLX5_EN_ARFS
#define ARFS_HASH_SHIFT BITS_PER_BYTE
#define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
struct arfs_table {
struct mlx5e_flow_table ft;
struct mlx5_flow_handle *default_rule;
struct hlist_head rules_hash[ARFS_HASH_SIZE];
};
enum arfs_type {
ARFS_IPV4_TCP,
ARFS_IPV6_TCP,
ARFS_IPV4_UDP,
ARFS_IPV6_UDP,
ARFS_NUM_TYPES,
};
struct mlx5e_arfs_tables {
struct arfs_table arfs_tables[ARFS_NUM_TYPES];
/* Protect aRFS rules list */
spinlock_t arfs_lock;
struct list_head rules;
int last_filter_id;
struct workqueue_struct *wq;
};
int mlx5e_arfs_create_tables(struct mlx5e_priv *priv);
void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv);
int mlx5e_arfs_enable(struct mlx5e_priv *priv);
int mlx5e_arfs_disable(struct mlx5e_priv *priv);
int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
#else
static inline int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) { return 0; }
static inline void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) {}
static inline int mlx5e_arfs_enable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSUPP; }
#endif
struct mlx5e_flow_steering {
struct mlx5_flow_namespace *ns;
#ifdef CONFIG_MLX5_EN_RXNFC
struct mlx5e_ethtool_steering ethtool;
#endif
struct mlx5e_tc_table tc;
struct mlx5e_vlan_table vlan;
struct mlx5e_l2_table l2;
struct mlx5e_ttc_table ttc;
struct mlx5e_ttc_table inner_ttc;
#ifdef CONFIG_MLX5_EN_ARFS
struct mlx5e_arfs_tables arfs;
#endif
};
struct ttc_params {
struct mlx5_flow_table_attr ft_attr;
u32 any_tt_tirn;
u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_ttc_table *inner_ttc;
};
void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc);
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft);
void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
#endif /* __MLX5E_FLOW_STEER_H__ */
...@@ -30,8 +30,6 @@ ...@@ -30,8 +30,6 @@
* SOFTWARE. * SOFTWARE.
*/ */
#ifdef CONFIG_RFS_ACCEL
#include <linux/hash.h> #include <linux/hash.h>
#include <linux/mlx5/fs.h> #include <linux/mlx5/fs.h>
#include <linux/ip.h> #include <linux/ip.h>
...@@ -738,4 +736,4 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -738,4 +736,4 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_unlock_bh(&arfs->arfs_lock); spin_unlock_bh(&arfs->arfs_lock);
return arfs_rule->filter_id; return arfs_rule->filter_id;
} }
#endif
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include "en.h" #include "en.h"
#include "en/port.h" #include "en/port.h"
#include "lib/clock.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo) struct ethtool_drvinfo *drvinfo)
...@@ -969,33 +970,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, ...@@ -969,33 +970,6 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir,
return 0; return 0;
} }
static int mlx5e_get_rxnfc(struct net_device *netdev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
int err = 0;
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = priv->channels.params.num_channels;
break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
#define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100 #define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100
#define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000 #define MLX5E_PFC_PREVEN_TOUT_MAX_MSEC 8000
#define MLX5E_PFC_PREVEN_MINOR_PRECENT 85 #define MLX5E_PFC_PREVEN_MINOR_PRECENT 85
...@@ -1133,10 +1107,10 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, ...@@ -1133,10 +1107,10 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
if (ret) if (ret)
return ret; return ret;
info->phc_index = mdev->clock.ptp ? info->phc_index = mlx5_clock_get_ptp_index(mdev);
ptp_clock_index(mdev->clock.ptp) : -1;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
info->phc_index == -1)
return 0; return 0;
info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE | info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
...@@ -1606,26 +1580,6 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev) ...@@ -1606,26 +1580,6 @@ static u32 mlx5e_get_priv_flags(struct net_device *netdev)
return priv->channels.params.pflags; return priv->channels.params.pflags;
} }
static int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
int err = 0;
struct mlx5e_priv *priv = netdev_priv(dev);
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
break;
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash) struct ethtool_flash *flash)
{ {
...@@ -1678,8 +1632,10 @@ const struct ethtool_ops mlx5e_ethtool_ops = { ...@@ -1678,8 +1632,10 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size,
.get_rxfh = mlx5e_get_rxfh, .get_rxfh = mlx5e_get_rxfh,
.set_rxfh = mlx5e_set_rxfh, .set_rxfh = mlx5e_set_rxfh,
#ifdef CONFIG_MLX5_EN_RXNFC
.get_rxnfc = mlx5e_get_rxnfc, .get_rxnfc = mlx5e_get_rxnfc,
.set_rxnfc = mlx5e_set_rxnfc, .set_rxnfc = mlx5e_set_rxnfc,
#endif
.flash_device = mlx5e_flash_device, .flash_device = mlx5e_flash_device,
.get_tunable = mlx5e_get_tunable, .get_tunable = mlx5e_get_tunable,
.set_tunable = mlx5e_set_tunable, .set_tunable = mlx5e_set_tunable,
...@@ -1696,5 +1652,4 @@ const struct ethtool_ops mlx5e_ethtool_ops = { ...@@ -1696,5 +1652,4 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.self_test = mlx5e_self_test, .self_test = mlx5e_self_test,
.get_msglevel = mlx5e_get_msglevel, .get_msglevel = mlx5e_get_msglevel,
.set_msglevel = mlx5e_set_msglevel, .set_msglevel = mlx5e_set_msglevel,
}; };
...@@ -66,11 +66,14 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, ...@@ -66,11 +66,14 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case TCP_V4_FLOW: case TCP_V4_FLOW:
case UDP_V4_FLOW: case UDP_V4_FLOW:
case TCP_V6_FLOW:
case UDP_V6_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS; max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
break; break;
case IP_USER_FLOW: case IP_USER_FLOW:
case IPV6_USER_FLOW:
max_tuples = ETHTOOL_NUM_L3_L4_FTS; max_tuples = ETHTOOL_NUM_L3_L4_FTS;
prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples);
eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; eth_ft = &priv->fs.ethtool.l3_l4_ft[prio];
...@@ -115,29 +118,203 @@ static void mask_spec(u8 *mask, u8 *val, size_t size) ...@@ -115,29 +118,203 @@ static void mask_spec(u8 *mask, u8 *val, size_t size)
*((u8 *)val) = *((u8 *)mask) & *((u8 *)val); *((u8 *)val) = *((u8 *)mask) & *((u8 *)val);
} }
static void set_ips(void *outer_headers_v, void *outer_headers_c, __be32 ip4src_m, #define MLX5E_FTE_SET(header_p, fld, v) \
__be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v) MLX5_SET(fte_match_set_lyr_2_4, header_p, fld, v)
#define MLX5E_FTE_ADDR_OF(header_p, fld) \
MLX5_ADDR_OF(fte_match_set_lyr_2_4, header_p, fld)
static void
set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
__be32 ip4src_v, __be32 ip4dst_m, __be32 ip4dst_v)
{ {
if (ip4src_m) { if (ip4src_m) {
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_v, sizeof(ip4src_v)); &ip4src_v, sizeof(ip4src_v));
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
src_ipv4_src_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4src_m)); 0xff, sizeof(ip4src_m));
} }
if (ip4dst_m) { if (ip4dst_m) {
memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v, memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_v, sizeof(ip4dst_v)); &ip4dst_v, sizeof(ip4dst_v));
memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c, memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4dst_m)); 0xff, sizeof(ip4dst_m));
} }
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
ethertype, ETH_P_IP); MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IP);
ethertype, 0xffff); }
static void
set_ip6(void *headers_c, void *headers_v, __be32 ip6src_m[4],
__be32 ip6src_v[4], __be32 ip6dst_m[4], __be32 ip6dst_v[4])
{
u8 ip6_sz = MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6);
if (!ipv6_addr_any((struct in6_addr *)ip6src_m)) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv6_layout.ipv6),
ip6src_v, ip6_sz);
memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv6_layout.ipv6),
ip6src_m, ip6_sz);
}
if (!ipv6_addr_any((struct in6_addr *)ip6dst_m)) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
ip6dst_v, ip6_sz);
memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
ip6dst_m, ip6_sz);
}
MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
MLX5E_FTE_SET(headers_v, ethertype, ETH_P_IPV6);
}
static void
set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
__be16 pdst_m, __be16 pdst_v)
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff);
MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
}
if (pdst_m) {
MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff);
MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
}
MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_TCP);
}
static void
set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
__be16 pdst_m, __be16 pdst_v)
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
}
if (pdst_m) {
MLX5E_FTE_SET(headers_c, udp_dport, 0xffff);
MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
}
MLX5E_FTE_SET(headers_c, ip_protocol, 0xffff);
MLX5E_FTE_SET(headers_v, ip_protocol, IPPROTO_UDP);
}
static void
parse_tcp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
struct ethtool_tcpip4_spec *l4_val = &fs->h_u.tcp_ip4_spec;
set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
l4_mask->ip4dst, l4_val->ip4dst);
set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
l4_mask->pdst, l4_val->pdst);
}
static void
parse_udp4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.udp_ip4_spec;
struct ethtool_tcpip4_spec *l4_val = &fs->h_u.udp_ip4_spec;
set_ip4(headers_c, headers_v, l4_mask->ip4src, l4_val->ip4src,
l4_mask->ip4dst, l4_val->ip4dst);
set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
l4_mask->pdst, l4_val->pdst);
}
static void
parse_ip4(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
struct ethtool_usrip4_spec *l3_val = &fs->h_u.usr_ip4_spec;
set_ip4(headers_c, headers_v, l3_mask->ip4src, l3_val->ip4src,
l3_mask->ip4dst, l3_val->ip4dst);
if (l3_mask->proto) {
MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->proto);
MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->proto);
}
}
static void
parse_ip6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
struct ethtool_usrip6_spec *l3_val = &fs->h_u.usr_ip6_spec;
set_ip6(headers_c, headers_v, l3_mask->ip6src,
l3_val->ip6src, l3_mask->ip6dst, l3_val->ip6dst);
if (l3_mask->l4_proto) {
MLX5E_FTE_SET(headers_c, ip_protocol, l3_mask->l4_proto);
MLX5E_FTE_SET(headers_v, ip_protocol, l3_val->l4_proto);
}
}
static void
parse_tcp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
struct ethtool_tcpip6_spec *l4_val = &fs->h_u.tcp_ip6_spec;
set_ip6(headers_c, headers_v, l4_mask->ip6src,
l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
set_tcp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
l4_mask->pdst, l4_val->pdst);
}
static void
parse_udp6(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.udp_ip6_spec;
struct ethtool_tcpip6_spec *l4_val = &fs->h_u.udp_ip6_spec;
set_ip6(headers_c, headers_v, l4_mask->ip6src,
l4_val->ip6src, l4_mask->ip6dst, l4_val->ip6dst);
set_udp(headers_c, headers_v, l4_mask->psrc, l4_val->psrc,
l4_mask->pdst, l4_val->pdst);
}
static void
parse_ether(void *headers_c, void *headers_v, struct ethtool_rx_flow_spec *fs)
{
struct ethhdr *eth_mask = &fs->m_u.ether_spec;
struct ethhdr *eth_val = &fs->h_u.ether_spec;
mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, smac_47_16), eth_mask->h_source);
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, smac_47_16), eth_val->h_source);
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), eth_mask->h_dest);
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), eth_val->h_dest);
MLX5E_FTE_SET(headers_c, ethertype, ntohs(eth_mask->h_proto));
MLX5E_FTE_SET(headers_v, ethertype, ntohs(eth_val->h_proto));
}
static void
set_cvlan(void *headers_c, void *headers_v, __be16 vlan_tci)
{
MLX5E_FTE_SET(headers_c, cvlan_tag, 1);
MLX5E_FTE_SET(headers_v, cvlan_tag, 1);
MLX5E_FTE_SET(headers_c, first_vid, 0xfff);
MLX5E_FTE_SET(headers_v, first_vid, ntohs(vlan_tci));
}
static void
set_dmac(void *headers_c, void *headers_v,
unsigned char m_dest[ETH_ALEN], unsigned char v_dest[ETH_ALEN])
{
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_c, dmac_47_16), m_dest);
ether_addr_copy(MLX5E_FTE_ADDR_OF(headers_v, dmac_47_16), v_dest);
} }
static int set_flow_attrs(u32 *match_c, u32 *match_v, static int set_flow_attrs(u32 *match_c, u32 *match_v,
...@@ -148,112 +325,42 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v, ...@@ -148,112 +325,42 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
outer_headers); outer_headers);
u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
struct ethtool_tcpip4_spec *l4_mask;
struct ethtool_tcpip4_spec *l4_val;
struct ethtool_usrip4_spec *l3_mask;
struct ethtool_usrip4_spec *l3_val;
struct ethhdr *eth_val;
struct ethhdr *eth_mask;
switch (flow_type) { switch (flow_type) {
case TCP_V4_FLOW: case TCP_V4_FLOW:
l4_mask = &fs->m_u.tcp_ip4_spec; parse_tcp4(outer_headers_c, outer_headers_v, fs);
l4_val = &fs->h_u.tcp_ip4_spec;
set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
if (l4_mask->psrc) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
ntohs(l4_val->psrc));
}
if (l4_mask->pdst) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
ntohs(l4_val->pdst));
}
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
IPPROTO_TCP);
break; break;
case UDP_V4_FLOW: case UDP_V4_FLOW:
l4_mask = &fs->m_u.tcp_ip4_spec; parse_udp4(outer_headers_c, outer_headers_v, fs);
l4_val = &fs->h_u.tcp_ip4_spec;
set_ips(outer_headers_v, outer_headers_c, l4_mask->ip4src,
l4_val->ip4src, l4_mask->ip4dst, l4_val->ip4dst);
if (l4_mask->psrc) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
ntohs(l4_val->psrc));
}
if (l4_mask->pdst) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
ntohs(l4_val->pdst));
}
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
0xffff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
IPPROTO_UDP);
break; break;
case IP_USER_FLOW: case IP_USER_FLOW:
l3_mask = &fs->m_u.usr_ip4_spec; parse_ip4(outer_headers_c, outer_headers_v, fs);
l3_val = &fs->h_u.usr_ip4_spec; break;
set_ips(outer_headers_v, outer_headers_c, l3_mask->ip4src, case TCP_V6_FLOW:
l3_val->ip4src, l3_mask->ip4dst, l3_val->ip4dst); parse_tcp6(outer_headers_c, outer_headers_v, fs);
break;
case UDP_V6_FLOW:
parse_udp6(outer_headers_c, outer_headers_v, fs);
break;
case IPV6_USER_FLOW:
parse_ip6(outer_headers_c, outer_headers_v, fs);
break; break;
case ETHER_FLOW: case ETHER_FLOW:
eth_mask = &fs->m_u.ether_spec; parse_ether(outer_headers_c, outer_headers_v, fs);
eth_val = &fs->h_u.ether_spec;
mask_spec((u8 *)eth_mask, (u8 *)eth_val, sizeof(*eth_mask));
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_c, smac_47_16),
eth_mask->h_source);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_v, smac_47_16),
eth_val->h_source);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_c, dmac_47_16),
eth_mask->h_dest);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_v, dmac_47_16),
eth_val->h_dest);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ethertype,
ntohs(eth_mask->h_proto));
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ethertype,
ntohs(eth_val->h_proto));
break; break;
default: default:
return -EINVAL; return -EINVAL;
} }
if ((fs->flow_type & FLOW_EXT) && if ((fs->flow_type & FLOW_EXT) &&
(fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) { (fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK)))
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, set_cvlan(outer_headers_c, outer_headers_v, fs->h_ext.vlan_tci);
cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
first_vid, 0xfff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
first_vid, ntohs(fs->h_ext.vlan_tci));
}
if (fs->flow_type & FLOW_MAC_EXT && if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest)) { !is_zero_ether_addr(fs->m_ext.h_dest)) {
mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN); mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, set_dmac(outer_headers_c, outer_headers_v, fs->m_ext.h_dest,
outer_headers_c, dmac_47_16), fs->h_ext.h_dest);
fs->m_ext.h_dest);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_v, dmac_47_16),
fs->h_ext.h_dest);
} }
return 0; return 0;
...@@ -379,16 +486,143 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv, ...@@ -379,16 +486,143 @@ static struct mlx5e_ethtool_rule *get_ethtool_rule(struct mlx5e_priv *priv,
#define all_zeros_or_all_ones(field) \ #define all_zeros_or_all_ones(field) \
((field) == 0 || (field) == (__force typeof(field))-1) ((field) == 0 || (field) == (__force typeof(field))-1)
static int validate_ethter(struct ethtool_rx_flow_spec *fs)
{
struct ethhdr *eth_mask = &fs->m_u.ether_spec;
int ntuples = 0;
if (!is_zero_ether_addr(eth_mask->h_dest))
ntuples++;
if (!is_zero_ether_addr(eth_mask->h_source))
ntuples++;
if (eth_mask->h_proto)
ntuples++;
return ntuples;
}
static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *l4_mask = &fs->m_u.tcp_ip4_spec;
int ntuples = 0;
if (l4_mask->tos)
return -EINVAL;
if (l4_mask->ip4src) {
if (!all_ones(l4_mask->ip4src))
return -EINVAL;
ntuples++;
}
if (l4_mask->ip4dst) {
if (!all_ones(l4_mask->ip4dst))
return -EINVAL;
ntuples++;
}
if (l4_mask->psrc) {
if (!all_ones(l4_mask->psrc))
return -EINVAL;
ntuples++;
}
if (l4_mask->pdst) {
if (!all_ones(l4_mask->pdst))
return -EINVAL;
ntuples++;
}
/* Flow is TCP/UDP */
return ++ntuples;
}
static int validate_ip4(struct ethtool_rx_flow_spec *fs)
{
struct ethtool_usrip4_spec *l3_mask = &fs->m_u.usr_ip4_spec;
int ntuples = 0;
if (l3_mask->l4_4_bytes || l3_mask->tos ||
fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
return -EINVAL;
if (l3_mask->ip4src) {
if (!all_ones(l3_mask->ip4src))
return -EINVAL;
ntuples++;
}
if (l3_mask->ip4dst) {
if (!all_ones(l3_mask->ip4dst))
return -EINVAL;
ntuples++;
}
if (l3_mask->proto)
ntuples++;
/* Flow is IPv4 */
return ++ntuples;
}
static int validate_ip6(struct ethtool_rx_flow_spec *fs)
{
struct ethtool_usrip6_spec *l3_mask = &fs->m_u.usr_ip6_spec;
int ntuples = 0;
if (l3_mask->l4_4_bytes || l3_mask->tclass)
return -EINVAL;
if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6src))
ntuples++;
if (!ipv6_addr_any((struct in6_addr *)l3_mask->ip6dst))
ntuples++;
if (l3_mask->l4_proto)
ntuples++;
/* Flow is IPv6 */
return ++ntuples;
}
static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip6_spec *l4_mask = &fs->m_u.tcp_ip6_spec;
int ntuples = 0;
if (l4_mask->tclass)
return -EINVAL;
if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6src))
ntuples++;
if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
ntuples++;
if (l4_mask->psrc) {
if (!all_ones(l4_mask->psrc))
return -EINVAL;
ntuples++;
}
if (l4_mask->pdst) {
if (!all_ones(l4_mask->pdst))
return -EINVAL;
ntuples++;
}
/* Flow is TCP/UDP */
return ++ntuples;
}
static int validate_vlan(struct ethtool_rx_flow_spec *fs)
{
if (fs->m_ext.vlan_etype ||
fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
return -EINVAL;
if (fs->m_ext.vlan_tci &&
(be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID))
return -EINVAL;
return 1;
}
static int validate_flow(struct mlx5e_priv *priv, static int validate_flow(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs) struct ethtool_rx_flow_spec *fs)
{ {
struct ethtool_tcpip4_spec *l4_mask;
struct ethtool_usrip4_spec *l3_mask;
struct ethhdr *eth_mask;
int num_tuples = 0; int num_tuples = 0;
int ret = 0;
if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES) if (fs->location >= MAX_NUM_OF_ETHTOOL_RULES)
return -EINVAL; return -ENOSPC;
if (fs->ring_cookie >= priv->channels.params.num_channels && if (fs->ring_cookie >= priv->channels.params.num_channels &&
fs->ring_cookie != RX_CLS_FLOW_DISC) fs->ring_cookie != RX_CLS_FLOW_DISC)
...@@ -396,73 +630,42 @@ static int validate_flow(struct mlx5e_priv *priv, ...@@ -396,73 +630,42 @@ static int validate_flow(struct mlx5e_priv *priv,
switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
case ETHER_FLOW: case ETHER_FLOW:
eth_mask = &fs->m_u.ether_spec; num_tuples += validate_ethter(fs);
if (!is_zero_ether_addr(eth_mask->h_dest))
num_tuples++;
if (!is_zero_ether_addr(eth_mask->h_source))
num_tuples++;
if (eth_mask->h_proto)
num_tuples++;
break; break;
case TCP_V4_FLOW: case TCP_V4_FLOW:
case UDP_V4_FLOW: case UDP_V4_FLOW:
if (fs->m_u.tcp_ip4_spec.tos) ret = validate_tcpudp4(fs);
return -EINVAL; if (ret < 0)
l4_mask = &fs->m_u.tcp_ip4_spec; return ret;
if (l4_mask->ip4src) { num_tuples += ret;
if (!all_ones(l4_mask->ip4src))
return -EINVAL;
num_tuples++;
}
if (l4_mask->ip4dst) {
if (!all_ones(l4_mask->ip4dst))
return -EINVAL;
num_tuples++;
}
if (l4_mask->psrc) {
if (!all_ones(l4_mask->psrc))
return -EINVAL;
num_tuples++;
}
if (l4_mask->pdst) {
if (!all_ones(l4_mask->pdst))
return -EINVAL;
num_tuples++;
}
/* Flow is TCP/UDP */
num_tuples++;
break; break;
case IP_USER_FLOW: case IP_USER_FLOW:
l3_mask = &fs->m_u.usr_ip4_spec; ret = validate_ip4(fs);
if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || if (ret < 0)
fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4) return ret;
return -EINVAL; num_tuples += ret;
if (l3_mask->ip4src) { break;
if (!all_ones(l3_mask->ip4src)) case TCP_V6_FLOW:
return -EINVAL; case UDP_V6_FLOW:
num_tuples++; ret = validate_tcpudp6(fs);
} if (ret < 0)
if (l3_mask->ip4dst) { return ret;
if (!all_ones(l3_mask->ip4dst)) num_tuples += ret;
return -EINVAL; break;
num_tuples++; case IPV6_USER_FLOW:
} ret = validate_ip6(fs);
/* Flow is IPv4 */ if (ret < 0)
num_tuples++; return ret;
num_tuples += ret;
break; break;
default: default:
return -EINVAL; return -ENOTSUPP;
} }
if ((fs->flow_type & FLOW_EXT)) { if ((fs->flow_type & FLOW_EXT)) {
if (fs->m_ext.vlan_etype || ret = validate_vlan(fs);
(fs->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))) if (ret < 0)
return -EINVAL; return ret;
num_tuples += ret;
if (fs->m_ext.vlan_tci) {
if (be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID)
return -EINVAL;
}
num_tuples++;
} }
if (fs->flow_type & FLOW_MAC_EXT && if (fs->flow_type & FLOW_MAC_EXT &&
...@@ -472,8 +675,9 @@ static int validate_flow(struct mlx5e_priv *priv, ...@@ -472,8 +675,9 @@ static int validate_flow(struct mlx5e_priv *priv,
return num_tuples; return num_tuples;
} }
int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, static int
struct ethtool_rx_flow_spec *fs) mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
struct ethtool_rx_flow_spec *fs)
{ {
struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_table *eth_ft;
struct mlx5e_ethtool_rule *eth_rule; struct mlx5e_ethtool_rule *eth_rule;
...@@ -483,8 +687,9 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, ...@@ -483,8 +687,9 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
num_tuples = validate_flow(priv, fs); num_tuples = validate_flow(priv, fs);
if (num_tuples <= 0) { if (num_tuples <= 0) {
netdev_warn(priv->netdev, "%s: flow is not valid\n", __func__); netdev_warn(priv->netdev, "%s: flow is not valid %d\n",
return -EINVAL; __func__, num_tuples);
return num_tuples;
} }
eth_ft = get_flow_table(priv, fs, num_tuples); eth_ft = get_flow_table(priv, fs, num_tuples);
...@@ -519,8 +724,8 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, ...@@ -519,8 +724,8 @@ int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv,
return err; return err;
} }
int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, static int
int location) mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, int location)
{ {
struct mlx5e_ethtool_rule *eth_rule; struct mlx5e_ethtool_rule *eth_rule;
int err = 0; int err = 0;
...@@ -539,8 +744,9 @@ int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv, ...@@ -539,8 +744,9 @@ int mlx5e_ethtool_flow_remove(struct mlx5e_priv *priv,
return err; return err;
} }
int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, static int
int location) mlx5e_ethtool_get_flow(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, int location)
{ {
struct mlx5e_ethtool_rule *eth_rule; struct mlx5e_ethtool_rule *eth_rule;
...@@ -557,8 +763,9 @@ int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, ...@@ -557,8 +763,9 @@ int mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, struct ethtool_rxnfc *info,
return -ENOENT; return -ENOENT;
} }
int mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, struct ethtool_rxnfc *info, static int
u32 *rule_locs) mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv,
struct ethtool_rxnfc *info, u32 *rule_locs)
{ {
int location = 0; int location = 0;
int idx = 0; int idx = 0;
...@@ -587,3 +794,51 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) ...@@ -587,3 +794,51 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv)
{ {
INIT_LIST_HEAD(&priv->fs.ethtool.rules); INIT_LIST_HEAD(&priv->fs.ethtool.rules);
} }
int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
{
int err = 0;
struct mlx5e_priv *priv = netdev_priv(dev);
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
err = mlx5e_ethtool_flow_replace(priv, &cmd->fs);
break;
case ETHTOOL_SRXCLSRLDEL:
err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
int mlx5e_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info, u32 *rule_locs)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int err = 0;
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = priv->channels.params.num_channels;
break;
case ETHTOOL_GRXCLSRLCNT:
info->rule_cnt = priv->fs.ethtool.tot_num_rules;
break;
case ETHTOOL_GRXCLSRULE:
err = mlx5e_ethtool_get_flow(priv, info, info->fs.location);
break;
case ETHTOOL_GRXCLSRLALL:
err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs);
break;
default:
err = -EOPNOTSUPP;
break;
}
return err;
}
...@@ -46,6 +46,7 @@ ...@@ -46,6 +46,7 @@
#include "accel/ipsec.h" #include "accel/ipsec.h"
#include "accel/tls.h" #include "accel/tls.h"
#include "lib/vxlan.h" #include "lib/vxlan.h"
#include "lib/clock.h"
#include "en/port.h" #include "en/port.h"
#include "en/xdp.h" #include "en/xdp.h"
...@@ -3624,7 +3625,7 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable) ...@@ -3624,7 +3625,7 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
return err; return err;
} }
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_MLX5_EN_ARFS
static int set_feature_arfs(struct net_device *netdev, bool enable) static int set_feature_arfs(struct net_device *netdev, bool enable)
{ {
struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_priv *priv = netdev_priv(netdev);
...@@ -3679,7 +3680,7 @@ static int mlx5e_set_features(struct net_device *netdev, ...@@ -3679,7 +3680,7 @@ static int mlx5e_set_features(struct net_device *netdev,
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs);
err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan);
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_MLX5_EN_ARFS
err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs);
#endif #endif
...@@ -3782,7 +3783,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr) ...@@ -3782,7 +3783,8 @@ int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
struct hwtstamp_config config; struct hwtstamp_config config;
int err; int err;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz)) if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz) ||
(mlx5_clock_get_ptp_index(priv->mdev) == -1))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
...@@ -4348,12 +4350,12 @@ static const struct net_device_ops mlx5e_netdev_ops = { ...@@ -4348,12 +4350,12 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port, .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port, .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
.ndo_features_check = mlx5e_features_check, .ndo_features_check = mlx5e_features_check,
#ifdef CONFIG_RFS_ACCEL
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout, .ndo_tx_timeout = mlx5e_tx_timeout,
.ndo_bpf = mlx5e_xdp, .ndo_bpf = mlx5e_xdp,
.ndo_xdp_xmit = mlx5e_xdp_xmit, .ndo_xdp_xmit = mlx5e_xdp_xmit,
#ifdef CONFIG_MLX5_EN_ARFS
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll, .ndo_poll_controller = mlx5e_netpoll,
#endif #endif
...@@ -4703,7 +4705,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) ...@@ -4703,7 +4705,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
FT_CAP(identified_miss_table_mode) && FT_CAP(identified_miss_table_mode) &&
FT_CAP(flow_table_modify)) { FT_CAP(flow_table_modify)) {
netdev->hw_features |= NETIF_F_HW_TC; netdev->hw_features |= NETIF_F_HW_TC;
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_MLX5_EN_ARFS
netdev->hw_features |= NETIF_F_NTUPLE; netdev->hw_features |= NETIF_F_NTUPLE;
#endif #endif
} }
...@@ -4947,7 +4949,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev, ...@@ -4947,7 +4949,7 @@ struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
return NULL; return NULL;
} }
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_MLX5_EN_ARFS
netdev->rx_cpu_rmap = mdev->rmap; netdev->rx_cpu_rmap = mdev->rmap;
#endif #endif
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include "mlx5_core.h" #include "mlx5_core.h"
#include "fpga/core.h" #include "fpga/core.h"
#include "eswitch.h" #include "eswitch.h"
#include "lib/clock.h"
#include "diag/fw_tracer.h" #include "diag/fw_tracer.h"
enum { enum {
......
...@@ -1876,7 +1876,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1876,7 +1876,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act, struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest, struct mlx5_flow_destination *dest,
int dest_num) int num_dest)
{ {
struct mlx5_flow_root_namespace *root = find_root(&ft->node); struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct mlx5_flow_destination gen_dest = {}; struct mlx5_flow_destination gen_dest = {};
...@@ -1889,7 +1889,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1889,7 +1889,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft)) if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (dest_num) if (num_dest)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
mutex_lock(&root->chain_lock); mutex_lock(&root->chain_lock);
next_ft = find_next_chained_ft(prio); next_ft = find_next_chained_ft(prio);
...@@ -1897,7 +1897,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1897,7 +1897,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; gen_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
gen_dest.ft = next_ft; gen_dest.ft = next_ft;
dest = &gen_dest; dest = &gen_dest;
dest_num = 1; num_dest = 1;
flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
} else { } else {
mutex_unlock(&root->chain_lock); mutex_unlock(&root->chain_lock);
...@@ -1905,7 +1905,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1905,7 +1905,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
} }
} }
handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, dest_num); handle = _mlx5_add_flow_rules(ft, spec, flow_act, dest, num_dest);
if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) { if (sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!IS_ERR_OR_NULL(handle) && if (!IS_ERR_OR_NULL(handle) &&
......
...@@ -33,8 +33,15 @@ ...@@ -33,8 +33,15 @@
#ifndef __LIB_CLOCK_H__ #ifndef __LIB_CLOCK_H__
#define __LIB_CLOCK_H__ #define __LIB_CLOCK_H__
#if IS_ENABLED(CONFIG_PTP_1588_CLOCK)
void mlx5_init_clock(struct mlx5_core_dev *mdev); void mlx5_init_clock(struct mlx5_core_dev *mdev);
void mlx5_cleanup_clock(struct mlx5_core_dev *mdev); void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return mdev->clock.ptp ? ptp_clock_index(mdev->clock.ptp) : -1;
}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp) u64 timestamp)
...@@ -48,4 +55,21 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, ...@@ -48,4 +55,21 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
return ns_to_ktime(nsec); return ns_to_ktime(nsec);
} }
#else
static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {}
static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {}
static inline void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) {}
static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev)
{
return -1;
}
static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
u64 timestamp)
{
return 0;
}
#endif
#endif #endif
...@@ -37,8 +37,6 @@ ...@@ -37,8 +37,6 @@
struct mlx5_vxlan; struct mlx5_vxlan;
struct mlx5_vxlan_port; struct mlx5_vxlan_port;
#ifdef CONFIG_MLX5_CORE_EN
static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan) static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
{ {
/* not allowed reason is encoded in vxlan pointer as error, /* not allowed reason is encoded in vxlan pointer as error,
...@@ -47,18 +45,20 @@ static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan) ...@@ -47,18 +45,20 @@ static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
return !IS_ERR_OR_NULL(vxlan); return !IS_ERR_OR_NULL(vxlan);
} }
#if IS_ENABLED(CONFIG_VXLAN)
struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev); struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev);
void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan); void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan);
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port); int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port);
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port); int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port);
struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port); struct mlx5_vxlan_port *mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port);
#else #else
static inline struct mlx5_vxlan* static inline struct mlx5_vxlan*
mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-ENOTSUPP); } mlx5_vxlan_create(struct mlx5_core_dev *mdev) { return ERR_PTR(-EOPNOTSUPP); }
static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; } static inline void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { return; }
static inline int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { return -EOPNOTSUPP; }
static inline struct mx5_vxlan_port*
mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { return NULL; }
#endif #endif
#endif /* __MLX5_VXLAN_H__ */ #endif /* __MLX5_VXLAN_H__ */
...@@ -99,7 +99,6 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, ...@@ -99,7 +99,6 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param); unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev, void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault); struct mlx5_pagefault *pfault);
void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force); void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev);
......
...@@ -177,7 +177,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -177,7 +177,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec, struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act, struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest, struct mlx5_flow_destination *dest,
int dest_num); int num_dest);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr); void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler, int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment