Commit 3bd09b05 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-updates-2018-10-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5e-updates-2018-10-01

This series includes updates to mlx5e ethernet netdevice driver:

From Or Gerlitz:
1) Support masks for l3/l4 filters in ethtool flow steering
2) Report checksum unnecessary also when the L3 checksum flag on the
   cqe is set and there's no L4 header
3) Allow reporting of checksum unnecessary, using an ethtool private flag.

From Gavi Teitz and Or, VF representors netdevs performance improvements
4) Allow striding RQ in VF representor and bigger RQ size, ~3X performance improvement
5) Enable stateless offloads for VF representor, csum and TSO, 1.5X performance improvement
6) RSS Support for VF representors
   6.1) Allow flow table destination fir VF representor steering rule.
   6.2) Create RSS flow table per representor netdev
   6.3) Expose mlx5e RSS ethtool to be used by representor netdevs
   6.4) Enable multi-queue and RSS for VF representors, using mlx5e existing infrastructure
            for managing a multi-queue RX RSS tables.

From Alaa Hleihel:
7) Cache the system image guid, The system image guid is a read-only field
   Read this once and save it on the core device.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 041a14d2 59c9d35e
......@@ -209,6 +209,7 @@ enum mlx5e_priv_flag {
MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3),
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4),
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
......@@ -290,6 +291,7 @@ struct mlx5e_dcbx_dp {
enum {
MLX5E_RQ_STATE_ENABLED,
MLX5E_RQ_STATE_AM,
MLX5E_RQ_STATE_NO_CSUM_COMPLETE,
};
struct mlx5e_cq {
......@@ -906,8 +908,8 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv);
......@@ -951,6 +953,8 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal);
int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv,
struct ethtool_coalesce *coal);
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv);
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv);
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info);
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
......@@ -966,6 +970,9 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels, u16 mtu);
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params);
void mlx5e_build_rss_params(struct mlx5e_params *params);
u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
void mlx5e_rx_dim_work(struct work_struct *work);
void mlx5e_tx_dim_work(struct work_struct *work);
......
......@@ -140,6 +140,7 @@ static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"tx_cqe_moder",
"rx_cqe_compress",
"rx_striding_rq",
"rx_no_csum_complete",
};
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
......@@ -859,18 +860,30 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev,
return err;
}
u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv)
{
return sizeof(priv->channels.params.toeplitz_hash_key);
}
static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return sizeof(priv->channels.params.toeplitz_hash_key);
return mlx5e_ethtool_get_rxfh_key_size(priv);
}
static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv)
{
return MLX5E_INDIR_RQT_SIZE;
}
static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
......@@ -1519,6 +1532,27 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable)
return 0;
}
static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_channels *channels = &priv->channels;
struct mlx5e_channel *c;
int i;
if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
return 0;
for (i = 0; i < channels->num; i++) {
c = channels->c[i];
if (enable)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
else
__clear_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
}
return 0;
}
static int mlx5e_handle_pflag(struct net_device *netdev,
u32 wanted_flags,
enum mlx5e_priv_flag flag,
......@@ -1570,6 +1604,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_STRIDING_RQ,
set_pflag_rx_striding_rq);
if (err)
goto out;
err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_NO_CSUM_COMPLETE,
set_pflag_rx_no_csum_complete);
out:
mutex_unlock(&priv->state_lock);
......
......@@ -131,14 +131,14 @@ set_ip4(void *headers_c, void *headers_v, __be32 ip4src_m,
if (ip4src_m) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_v, sizeof(ip4src_v));
memset(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4src_m));
memcpy(MLX5E_FTE_ADDR_OF(headers_c, src_ipv4_src_ipv6.ipv4_layout.ipv4),
&ip4src_m, sizeof(ip4src_m));
}
if (ip4dst_m) {
memcpy(MLX5E_FTE_ADDR_OF(headers_v, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_v, sizeof(ip4dst_v));
memset(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
0xff, sizeof(ip4dst_m));
memcpy(MLX5E_FTE_ADDR_OF(headers_c, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
&ip4dst_m, sizeof(ip4dst_m));
}
MLX5E_FTE_SET(headers_c, ethertype, 0xffff);
......@@ -173,11 +173,11 @@ set_tcp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
__be16 pdst_m, __be16 pdst_v)
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, tcp_sport, 0xffff);
MLX5E_FTE_SET(headers_c, tcp_sport, ntohs(psrc_m));
MLX5E_FTE_SET(headers_v, tcp_sport, ntohs(psrc_v));
}
if (pdst_m) {
MLX5E_FTE_SET(headers_c, tcp_dport, 0xffff);
MLX5E_FTE_SET(headers_c, tcp_dport, ntohs(pdst_m));
MLX5E_FTE_SET(headers_v, tcp_dport, ntohs(pdst_v));
}
......@@ -190,12 +190,12 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
__be16 pdst_m, __be16 pdst_v)
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_m));
MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
}
if (pdst_m) {
MLX5E_FTE_SET(headers_c, udp_dport, 0xffff);
MLX5E_FTE_SET(headers_c, udp_dport, ntohs(pdst_m));
MLX5E_FTE_SET(headers_v, udp_dport, ntohs(pdst_v));
}
......@@ -508,26 +508,14 @@ static int validate_tcpudp4(struct ethtool_rx_flow_spec *fs)
if (l4_mask->tos)
return -EINVAL;
if (l4_mask->ip4src) {
if (!all_ones(l4_mask->ip4src))
return -EINVAL;
if (l4_mask->ip4src)
ntuples++;
}
if (l4_mask->ip4dst) {
if (!all_ones(l4_mask->ip4dst))
return -EINVAL;
if (l4_mask->ip4dst)
ntuples++;
}
if (l4_mask->psrc) {
if (!all_ones(l4_mask->psrc))
return -EINVAL;
if (l4_mask->psrc)
ntuples++;
}
if (l4_mask->pdst) {
if (!all_ones(l4_mask->pdst))
return -EINVAL;
if (l4_mask->pdst)
ntuples++;
}
/* Flow is TCP/UDP */
return ++ntuples;
}
......@@ -540,16 +528,10 @@ static int validate_ip4(struct ethtool_rx_flow_spec *fs)
if (l3_mask->l4_4_bytes || l3_mask->tos ||
fs->h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4)
return -EINVAL;
if (l3_mask->ip4src) {
if (!all_ones(l3_mask->ip4src))
return -EINVAL;
if (l3_mask->ip4src)
ntuples++;
}
if (l3_mask->ip4dst) {
if (!all_ones(l3_mask->ip4dst))
return -EINVAL;
if (l3_mask->ip4dst)
ntuples++;
}
if (l3_mask->proto)
ntuples++;
/* Flow is IPv4 */
......@@ -588,16 +570,10 @@ static int validate_tcpudp6(struct ethtool_rx_flow_spec *fs)
if (!ipv6_addr_any((struct in6_addr *)l4_mask->ip6dst))
ntuples++;
if (l4_mask->psrc) {
if (!all_ones(l4_mask->psrc))
return -EINVAL;
if (l4_mask->psrc)
ntuples++;
}
if (l4_mask->pdst) {
if (!all_ones(l4_mask->pdst))
return -EINVAL;
if (l4_mask->pdst)
ntuples++;
}
/* Flow is TCP/UDP */
return ++ntuples;
}
......
......@@ -929,6 +929,9 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (params->rx_dim_enabled)
__set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
if (params->pflags & MLX5E_PFLAG_RX_NO_CSUM_COMPLETE)
__set_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &c->rq.state);
return 0;
err_destroy_rq:
......@@ -3175,7 +3178,7 @@ static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *t
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
}
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
{
struct mlx5e_tir *tir;
void *tirc;
......@@ -3202,7 +3205,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
}
}
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
goto out;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
......@@ -3273,14 +3276,14 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
return err;
}
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
{
int i;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
return;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
......@@ -4480,6 +4483,31 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo
return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
}
void mlx5e_build_rq_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params)
{
/* Prefer Striding RQ, unless any of the following holds:
* - Striding RQ configuration is not possible/supported.
* - Slow PCI heuristic.
* - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
*/
if (!slow_pci_heuristic(mdev) &&
mlx5e_striding_rq_possible(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
!mlx5e_rx_is_linear_skb(mdev, params)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
}
void mlx5e_build_rss_params(struct mlx5e_params *params)
{
params->rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(params->indirection_rqt,
MLX5E_INDIR_RQT_SIZE, params->num_channels);
}
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels, u16 mtu)
......@@ -4503,20 +4531,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->rx_cqe_compress_def = slow_pci_heuristic(mdev);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, false);
/* RQ */
/* Prefer Striding RQ, unless any of the following holds:
* - Striding RQ configuration is not possible/supported.
* - Slow PCI heuristic.
* - Legacy RQ would use linear SKB while Striding RQ would use non-linear.
*/
if (!slow_pci_heuristic(mdev) &&
mlx5e_striding_rq_possible(mdev, params) &&
(mlx5e_rx_mpwqe_is_linear_skb(mdev, params) ||
!mlx5e_rx_is_linear_skb(mdev, params)))
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_STRIDING_RQ, true);
mlx5e_set_rq_type(mdev, params);
mlx5e_init_rq_type_params(mdev, params);
mlx5e_build_rq_params(mdev, params);
/* HW LRO */
......@@ -4539,10 +4557,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
params->rss_hfunc = ETH_RSS_HASH_XOR;
netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
mlx5e_build_default_indir_rqt(params->indirection_rqt,
MLX5E_INDIR_RQT_SIZE, max_channels);
mlx5e_build_rss_params(params);
}
static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
......@@ -4780,7 +4795,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_indirect_rqts;
err = mlx5e_create_indirect_tirs(priv);
err = mlx5e_create_indirect_tirs(priv, true);
if (err)
goto err_destroy_direct_rqts;
......@@ -4805,7 +4820,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_indirect_tirs(priv, true);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
......@@ -4822,7 +4837,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_tc_nic_cleanup(priv);
mlx5e_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_indirect_tirs(priv, true);
mlx5e_destroy_direct_rqts(priv);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
......
......@@ -46,8 +46,6 @@
#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
#define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
......@@ -182,12 +180,108 @@ static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
}
}
static void mlx5e_rep_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_ringparam(priv, param);
}
static int mlx5e_rep_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
return mlx5e_ethtool_set_ringparam(priv, param);
}
static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
struct mlx5_flow_destination *dest)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_flow_handle *flow_rule;
flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
rep->vport,
dest);
if (IS_ERR(flow_rule))
return PTR_ERR(flow_rule);
mlx5_del_flow_rules(rpriv->vport_rx_rule);
rpriv->vport_rx_rule = flow_rule;
return 0;
}
static void mlx5e_rep_get_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
mlx5e_ethtool_get_channels(priv, ch);
}
static int mlx5e_rep_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
u16 curr_channels_amount = priv->channels.params.num_channels;
u32 new_channels_amount = ch->combined_count;
struct mlx5_flow_destination new_dest;
int err = 0;
err = mlx5e_ethtool_set_channels(priv, ch);
if (err)
return err;
if (curr_channels_amount == 1 && new_channels_amount > 1) {
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
new_dest.ft = priv->fs.ttc.ft.t;
} else if (new_channels_amount == 1 && curr_channels_amount > 1) {
new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
new_dest.tir_num = priv->direct_tir[0].tirn;
} else {
return 0;
}
err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
if (err) {
netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
curr_channels_amount, new_channels_amount);
return err;
}
return 0;
}
static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_get_rxfh_key_size(priv);
}
static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
return mlx5e_ethtool_get_rxfh_indir_size(priv);
}
static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
.get_drvinfo = mlx5e_rep_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = mlx5e_rep_get_strings,
.get_sset_count = mlx5e_rep_get_sset_count,
.get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
.get_ringparam = mlx5e_rep_get_ringparam,
.set_ringparam = mlx5e_rep_set_ringparam,
.get_channels = mlx5e_rep_get_channels,
.set_channels = mlx5e_rep_set_channels,
.get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
.get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
};
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
......@@ -934,16 +1028,20 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
params->hard_mtu = MLX5E_ETH_HARD_MTU;
params->sw_mtu = mtu;
params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
params->rq_wq_type = MLX5_WQ_TYPE_CYCLIC;
params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
/* RQ */
mlx5e_build_rq_params(mdev, params);
/* CQ moderation params */
params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
params->num_tc = 1;
params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
/* RSS */
mlx5e_build_rss_params(params);
}
static void mlx5e_build_rep_netdev(struct net_device *netdev)
......@@ -963,6 +1061,16 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
netdev->hw_features |= NETIF_F_HW_TC;
netdev->hw_features |= NETIF_F_SG;
netdev->hw_features |= NETIF_F_IP_CSUM;
netdev->hw_features |= NETIF_F_IPV6_CSUM;
netdev->hw_features |= NETIF_F_GRO;
netdev->hw_features |= NETIF_F_TSO;
netdev->hw_features |= NETIF_F_TSO6;
netdev->hw_features |= NETIF_F_RXCSUM;
netdev->features |= netdev->hw_features;
eth_hw_addr_random(netdev);
netdev->min_mtu = ETH_MIN_MTU;
......@@ -986,7 +1094,7 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
priv->channels.params.num_channels = profile->max_nch(mdev);
priv->channels.params.num_channels = 1;
mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
mlx5e_build_rep_netdev(netdev);
......@@ -994,13 +1102,50 @@ static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
mlx5e_timestamp_init(priv);
}
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
{
struct ttc_params ttc_params = {};
int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL);
/* The inner_ttc in the ttc params is intentionally not set */
ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
mlx5e_set_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) {
netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
return err;
}
return 0;
}
static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_destination dest;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = priv->direct_tir[0].tirn;
flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
rep->vport,
&dest);
if (IS_ERR(flow_rule))
return PTR_ERR(flow_rule);
rpriv->vport_rx_rule = flow_rule;
return 0;
}
static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
int err;
mlx5e_init_l2_addr(priv);
......@@ -1011,29 +1156,42 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
return err;
}
err = mlx5e_create_direct_rqts(priv);
err = mlx5e_create_indirect_rqt(priv);
if (err)
goto err_close_drop_rq;
err = mlx5e_create_direct_tirs(priv);
err = mlx5e_create_direct_rqts(priv);
if (err)
goto err_destroy_indirect_rqts;
err = mlx5e_create_indirect_tirs(priv, false);
if (err)
goto err_destroy_direct_rqts;
flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
rep->vport,
priv->direct_tir[0].tirn);
if (IS_ERR(flow_rule)) {
err = PTR_ERR(flow_rule);
err = mlx5e_create_direct_tirs(priv);
if (err)
goto err_destroy_indirect_tirs;
err = mlx5e_create_rep_ttc_table(priv);
if (err)
goto err_destroy_direct_tirs;
}
rpriv->vport_rx_rule = flow_rule;
err = mlx5e_create_rep_vport_rx_rule(priv);
if (err)
goto err_destroy_ttc_table;
return 0;
err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv, false);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
err_close_drop_rq:
mlx5e_close_drop_rq(&priv->drop_rq);
return err;
......@@ -1044,8 +1202,11 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
mlx5_del_flow_rules(rpriv->vport_rx_rule);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv, false);
mlx5e_destroy_direct_rqts(priv);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
}
......@@ -1061,12 +1222,6 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
return 0;
}
static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
{
#define MLX5E_PORT_REPRESENTOR_NCH 1
return MLX5E_PORT_REPRESENTOR_NCH;
}
static const struct mlx5e_profile mlx5e_rep_profile = {
.init = mlx5e_init_rep,
.init_rx = mlx5e_init_rep_rx,
......@@ -1074,10 +1229,10 @@ static const struct mlx5e_profile mlx5e_rep_profile = {
.init_tx = mlx5e_init_rep_tx,
.cleanup_tx = mlx5e_cleanup_nic_tx,
.update_stats = mlx5e_rep_update_hw_counters,
.max_nch = mlx5e_get_rep_max_num_channels,
.max_nch = mlx5e_get_max_num_channels,
.update_carrier = NULL,
.rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
.rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
.rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
.max_tc = 1,
};
......
......@@ -782,6 +782,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
if (unlikely(test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state)))
goto csum_unnecessary;
if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
if (unlikely(get_ip_proto(skb, proto) == IPPROTO_SCTP))
goto csum_unnecessary;
......@@ -805,7 +808,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
csum_unnecessary:
if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
(cqe->hds_ip_ext & CQE_L4_OK))) {
((cqe->hds_ip_ext & CQE_L4_OK) ||
(get_cqe_l4_hdr_type(cqe) == CQE_L4_HDR_TYPE_NONE)))) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (cqe_is_tunneled(cqe)) {
skb->csum_level = 1;
......
......@@ -2040,8 +2040,8 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
fmdev = priv->mdev;
pmdev = peer_priv->mdev;
mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
return (fsystem_guid == psystem_guid);
}
......
......@@ -230,7 +230,8 @@ mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr);
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn);
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
struct mlx5_flow_destination *dest);
enum {
SET_VLAN_STRIP = BIT(0),
......
......@@ -775,10 +775,10 @@ static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
}
struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport,
struct mlx5_flow_destination *dest)
{
struct mlx5_flow_act flow_act = {0};
struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
......@@ -796,12 +796,10 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
dest.tir_num = tirn;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
&flow_act, &dest, 1);
&flow_act, dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out;
......
......@@ -368,7 +368,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_indirect_rqts;
err = mlx5e_create_indirect_tirs(priv);
err = mlx5e_create_indirect_tirs(priv, true);
if (err)
goto err_destroy_direct_rqts;
......@@ -385,7 +385,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_indirect_tirs(priv, true);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv);
err_destroy_indirect_rqts:
......@@ -401,7 +401,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_indirect_tirs(priv, true);
mlx5e_destroy_direct_rqts(priv);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);
......
......@@ -1201,3 +1201,12 @@ int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev)
{
if (!mdev->sys_image_guid)
mlx5_query_nic_vport_system_image_guid(mdev, &mdev->sys_image_guid);
return mdev->sys_image_guid;
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid);
......@@ -838,6 +838,7 @@ struct mlx5_core_dev {
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
} caps;
u64 sys_image_guid;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
enum mlx5_device_state state;
......
......@@ -121,4 +121,6 @@ int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status);
int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
struct mlx5_core_dev *port_mdev);
int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
#endif /* __MLX5_VPORT_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment