Commit f9b6ae29 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-updates-2018-01-19' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2018-01-19

From: Or Gerlitz <ogerlitz@mellanox.com>
=======
First six patches of this series further enhances the mlx5 hairpin support.
The first two patches deal with using different hairpin instances
for flows whose packets have different priorities to align with the port
TX QoS model. The next four patches allow us to do HW spreading
of flows over a set of hairpin pairs using RSS. The last two patches
change the driver to also set the size of the HW hairpin queues.
========

Next four patches from Eran Ben Elisha <eranbe@mellanox.com>:
Add more debug data for TX timeout handling, and further enhance and optimize
TX timeout handling upon lost interrupts, which adds a mechanism for explicitly
polling EQ in case of a TX timeout in order to recover from a lost interrupt.
If this is not the case (no pending EQEs), perform a channels full recovery as
usual.

From Kamal Heib <kamalh@mellanox.com>, Two patches to extend the stats group API
to have an update_stats() callback which will be used to fetch the hardware or
software counters data, this will improve the current API and reduce code
duplication.

From Gal Pressman <galp@mellanox.com>, Last patch, Add likely to the common RX checksum
flow.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 01c15e93 63a612f9
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include <linux/mlx5/port.h> #include <linux/mlx5/port.h>
#include <linux/mlx5/vport.h> #include <linux/mlx5/vport.h>
#include <linux/mlx5/transobj.h> #include <linux/mlx5/transobj.h>
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h> #include <linux/rhashtable.h>
#include <net/switchdev.h> #include <net/switchdev.h>
#include <net/xdp.h> #include <net/xdp.h>
...@@ -560,6 +561,7 @@ struct mlx5e_channel { ...@@ -560,6 +561,7 @@ struct mlx5e_channel {
/* data path - accessed per napi poll */ /* data path - accessed per napi poll */
struct irq_desc *irq_desc; struct irq_desc *irq_desc;
struct mlx5e_ch_stats stats;
/* control */ /* control */
struct mlx5e_priv *priv; struct mlx5e_priv *priv;
...@@ -696,6 +698,11 @@ enum { ...@@ -696,6 +698,11 @@ enum {
MLX5E_ARFS_FT_LEVEL MLX5E_ARFS_FT_LEVEL
}; };
enum {
MLX5E_TC_FT_LEVEL = 0,
MLX5E_TC_TTC_FT_LEVEL,
};
struct mlx5e_ethtool_table { struct mlx5e_ethtool_table {
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int num_rules; int num_rules;
...@@ -834,7 +841,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix); ...@@ -834,7 +841,7 @@ void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix); void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix);
void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi); void mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi);
void mlx5e_update_stats(struct mlx5e_priv *priv, bool full); void mlx5e_update_stats(struct mlx5e_priv *priv);
int mlx5e_create_flow_steering(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv);
void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv);
...@@ -1024,11 +1031,26 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv); ...@@ -1024,11 +1031,26 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv); void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv);
void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv); struct ttc_params {
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); struct mlx5_flow_table_attr ft_attr;
u32 any_tt_tirn;
u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_ttc_table *inner_ttc;
};
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv); void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params);
void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv); void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params);
void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc);
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc);
void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn); u32 underlay_qpn, u32 *tisn);
...@@ -1041,6 +1063,8 @@ int mlx5e_open(struct net_device *netdev); ...@@ -1041,6 +1063,8 @@ int mlx5e_open(struct net_device *netdev);
void mlx5e_update_stats_work(struct work_struct *work); void mlx5e_update_stats_work(struct work_struct *work);
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout); u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout);
int mlx5e_bits_invert(unsigned long a, int size);
/* ethtool helpers */ /* ethtool helpers */
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv, void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo); struct ethtool_drvinfo *drvinfo);
......
...@@ -207,7 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv, ...@@ -207,7 +207,7 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
return; return;
mutex_lock(&priv->state_lock); mutex_lock(&priv->state_lock);
mlx5e_update_stats(priv, true); mlx5e_update_stats(priv);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
for (i = 0; i < mlx5e_num_stats_grps; i++) for (i = 0; i < mlx5e_num_stats_grps; i++)
......
...@@ -806,25 +806,25 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, ...@@ -806,25 +806,25 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule; return err ? ERR_PTR(err) : rule;
} }
static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv,
struct ttc_params *params,
struct mlx5e_ttc_table *ttc)
{ {
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_handle **rules; struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int tt; int tt;
int err; int err;
ttc = &priv->fs.ttc;
ft = ttc->ft.t; ft = ttc->ft.t;
rules = ttc->rules; rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) { for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY) if (tt == MLX5E_TT_ANY)
dest.tir_num = priv->direct_tir[0].tirn; dest.tir_num = params->any_tt_tirn;
else else
dest.tir_num = priv->indir_tir[tt].tirn; dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype, ttc_rules[tt].etype,
ttc_rules[tt].proto); ttc_rules[tt].proto);
...@@ -832,12 +832,12 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv) ...@@ -832,12 +832,12 @@ static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
goto del_rules; goto del_rules;
} }
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0; return 0;
rules = ttc->tunnel_rules; rules = ttc->tunnel_rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
dest.ft = priv->fs.inner_ttc.ft.t; dest.ft = params->inner_ttc->ft.t;
for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) {
rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
ttc_tunnel_rules[tt].etype, ttc_tunnel_rules[tt].etype,
...@@ -977,25 +977,25 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv, ...@@ -977,25 +977,25 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
return err ? ERR_PTR(err) : rule; return err ? ERR_PTR(err) : rule;
} }
static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv) static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv,
struct ttc_params *params,
struct mlx5e_ttc_table *ttc)
{ {
struct mlx5_flow_destination dest = {}; struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rules; struct mlx5_flow_handle **rules;
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_table *ft; struct mlx5_flow_table *ft;
int err; int err;
int tt; int tt;
ttc = &priv->fs.inner_ttc;
ft = ttc->ft.t; ft = ttc->ft.t;
rules = ttc->rules; rules = ttc->rules;
dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
for (tt = 0; tt < MLX5E_NUM_TT; tt++) { for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
if (tt == MLX5E_TT_ANY) if (tt == MLX5E_TT_ANY)
dest.tir_num = priv->direct_tir[0].tirn; dest.tir_num = params->any_tt_tirn;
else else
dest.tir_num = priv->inner_indir_tir[tt].tirn; dest.tir_num = params->indir_tirn[tt];
rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest, rules[tt] = mlx5e_generate_inner_ttc_rule(priv, ft, &dest,
ttc_rules[tt].etype, ttc_rules[tt].etype,
...@@ -1075,21 +1075,42 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) ...@@ -1075,21 +1075,42 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
return err; return err;
} }
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv,
struct ttc_params *ttc_params)
{
ttc_params->any_tt_tirn = priv->direct_tir[0].tirn;
ttc_params->inner_ttc = &priv->fs.inner_ttc;
}
void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
}
void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params)
{
struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
ft_attr->level = MLX5E_TTC_FT_LEVEL;
ft_attr->prio = MLX5E_NIC_PRIO;
}
int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc)
{ {
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft; struct mlx5e_flow_table *ft = &ttc->ft;
int err; int err;
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return 0; return 0;
ft_attr.max_fte = MLX5E_INNER_TTC_TABLE_SIZE; ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
ft_attr.level = MLX5E_INNER_TTC_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) { if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t); err = PTR_ERR(ft->t);
ft->t = NULL; ft->t = NULL;
...@@ -1100,7 +1121,7 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) ...@@ -1100,7 +1121,7 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
if (err) if (err)
goto err; goto err;
err = mlx5e_generate_inner_ttc_table_rules(priv); err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc);
if (err) if (err)
goto err; goto err;
...@@ -1111,10 +1132,9 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) ...@@ -1111,10 +1132,9 @@ int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
return err; return err;
} }
void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc)
{ {
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
return; return;
...@@ -1122,27 +1142,21 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) ...@@ -1122,27 +1142,21 @@ void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
mlx5e_destroy_flow_table(&ttc->ft); mlx5e_destroy_flow_table(&ttc->ft);
} }
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv,
struct mlx5e_ttc_table *ttc)
{ {
struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
mlx5e_cleanup_ttc_rules(ttc); mlx5e_cleanup_ttc_rules(ttc);
mlx5e_destroy_flow_table(&ttc->ft); mlx5e_destroy_flow_table(&ttc->ft);
} }
int mlx5e_create_ttc_table(struct mlx5e_priv *priv) int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params,
struct mlx5e_ttc_table *ttc)
{ {
bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version);
struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
struct mlx5_flow_table_attr ft_attr = {};
struct mlx5e_flow_table *ft = &ttc->ft; struct mlx5e_flow_table *ft = &ttc->ft;
int err; int err;
ft_attr.max_fte = MLX5E_TTC_TABLE_SIZE; ft->t = mlx5_create_flow_table(priv->fs.ns, &params->ft_attr);
ft_attr.level = MLX5E_TTC_FT_LEVEL;
ft_attr.prio = MLX5E_NIC_PRIO;
ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr);
if (IS_ERR(ft->t)) { if (IS_ERR(ft->t)) {
err = PTR_ERR(ft->t); err = PTR_ERR(ft->t);
ft->t = NULL; ft->t = NULL;
...@@ -1153,7 +1167,7 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv) ...@@ -1153,7 +1167,7 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
if (err) if (err)
goto err; goto err;
err = mlx5e_generate_ttc_table_rules(priv); err = mlx5e_generate_ttc_table_rules(priv, params, ttc);
if (err) if (err)
goto err; goto err;
...@@ -1474,7 +1488,8 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) ...@@ -1474,7 +1488,8 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
int mlx5e_create_flow_steering(struct mlx5e_priv *priv) int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
{ {
int err; struct ttc_params ttc_params = {};
int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL); MLX5_FLOW_NAMESPACE_KERNEL);
...@@ -1489,14 +1504,23 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) ...@@ -1489,14 +1504,23 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE; priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
} }
err = mlx5e_create_inner_ttc_table(priv); mlx5e_set_ttc_basic_params(priv, &ttc_params);
mlx5e_set_inner_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) { if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err); err);
goto err_destroy_arfs_tables; goto err_destroy_arfs_tables;
} }
err = mlx5e_create_ttc_table(priv); mlx5e_set_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) { if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err); err);
...@@ -1524,9 +1548,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) ...@@ -1524,9 +1548,9 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
err_destroy_l2_table: err_destroy_l2_table:
mlx5e_destroy_l2_table(priv); mlx5e_destroy_l2_table(priv);
err_destroy_ttc_table: err_destroy_ttc_table:
mlx5e_destroy_ttc_table(priv); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
err_destroy_inner_ttc_table: err_destroy_inner_ttc_table:
mlx5e_destroy_inner_ttc_table(priv); mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables: err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv); mlx5e_arfs_destroy_tables(priv);
...@@ -1537,8 +1561,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) ...@@ -1537,8 +1561,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
{ {
mlx5e_destroy_vlan_table(priv); mlx5e_destroy_vlan_table(priv);
mlx5e_destroy_l2_table(priv); mlx5e_destroy_l2_table(priv);
mlx5e_destroy_ttc_table(priv); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_inner_ttc_table(priv); mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv); mlx5e_arfs_destroy_tables(priv);
mlx5e_ethtool_cleanup_steering(priv); mlx5e_ethtool_cleanup_steering(priv);
} }
...@@ -173,182 +173,23 @@ static void mlx5e_tx_timeout_work(struct work_struct *work) ...@@ -173,182 +173,23 @@ static void mlx5e_tx_timeout_work(struct work_struct *work)
rtnl_unlock(); rtnl_unlock();
} }
static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) void mlx5e_update_stats(struct mlx5e_priv *priv)
{ {
struct mlx5e_sw_stats temp, *s = &temp; int i;
struct mlx5e_rq_stats *rq_stats;
struct mlx5e_sq_stats *sq_stats;
int i, j;
memset(s, 0, sizeof(*s));
for (i = 0; i < priv->channels.num; i++) {
struct mlx5e_channel *c = priv->channels.c[i];
rq_stats = &c->rq.stats;
s->rx_packets += rq_stats->packets;
s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
s->rx_xdp_drop += rq_stats->xdp_drop;
s->rx_xdp_tx += rq_stats->xdp_tx;
s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
s->rx_wqe_err += rq_stats->wqe_err;
s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
s->rx_page_reuse += rq_stats->page_reuse;
s->rx_cache_reuse += rq_stats->cache_reuse;
s->rx_cache_full += rq_stats->cache_full;
s->rx_cache_empty += rq_stats->cache_empty;
s->rx_cache_busy += rq_stats->cache_busy;
s->rx_cache_waive += rq_stats->cache_waive;
for (j = 0; j < priv->channels.params.num_tc; j++) {
sq_stats = &c->sq[j].stats;
s->tx_packets += sq_stats->packets;
s->tx_bytes += sq_stats->bytes;
s->tx_tso_packets += sq_stats->tso_packets;
s->tx_tso_bytes += sq_stats->tso_bytes;
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
s->tx_csum_none += sq_stats->csum_none;
s->tx_csum_partial += sq_stats->csum_partial;
}
}
s->link_down_events_phy = MLX5_GET(ppcnt_reg,
priv->stats.pport.phy_counters,
counter_set.phys_layer_cntrs.link_down_events);
memcpy(&priv->stats.sw, s, sizeof(*s));
}
static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
{
int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
u32 *out = (u32 *)priv->stats.vport.query_vport_out;
u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
struct mlx5_core_dev *mdev = priv->mdev;
MLX5_SET(query_vport_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_VPORT_COUNTER);
MLX5_SET(query_vport_counter_in, in, op_mod, 0);
MLX5_SET(query_vport_counter_in, in, other_vport, 0);
mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
}
static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
{
struct mlx5e_pport_stats *pstats = &priv->stats.pport;
struct mlx5_core_dev *mdev = priv->mdev;
u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
int prio;
void *out;
MLX5_SET(ppcnt_reg, in, local_port, 1);
out = pstats->IEEE_802_3_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
if (!full)
return;
out = pstats->RFC_2863_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
out = pstats->RFC_2819_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
out = pstats->phy_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
out = pstats->phy_statistical_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
out = pstats->eth_ext_counters;
MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
}
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
out = pstats->per_prio_counters[prio];
MLX5_SET(ppcnt_reg, in, prio_tc, prio);
mlx5_core_access_reg(mdev, in, sz, out, sz,
MLX5_REG_PPCNT, 0, 0);
}
}
static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
{
struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
int err;
if (!priv->q_counter)
return;
err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
if (err)
return;
qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
}
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
void *out;
if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
return;
out = pcie_stats->pcie_perf_counters;
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
}
void mlx5e_update_stats(struct mlx5e_priv *priv, bool full) for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
{ if (mlx5e_stats_grps[i].update_stats)
if (full) { mlx5e_stats_grps[i].update_stats(priv);
mlx5e_update_pcie_counters(priv);
mlx5e_ipsec_update_stats(priv);
}
mlx5e_update_pport_counters(priv, full);
mlx5e_update_vport_counters(priv);
mlx5e_update_q_counter(priv);
mlx5e_update_sw_counters(priv);
} }
static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
{ {
mlx5e_update_stats(priv, false); int i;
for (i = mlx5e_num_stats_grps - 1; i >= 0; i--)
if (mlx5e_stats_grps[i].update_stats_mask &
MLX5E_NDO_UPDATE_STATS)
mlx5e_stats_grps[i].update_stats(priv);
} }
void mlx5e_update_stats_work(struct work_struct *work) void mlx5e_update_stats_work(struct work_struct *work)
...@@ -2219,7 +2060,7 @@ static int mlx5e_rx_hash_fn(int hfunc) ...@@ -2219,7 +2060,7 @@ static int mlx5e_rx_hash_fn(int hfunc)
MLX5_RX_HASH_FN_INVERTED_XOR8; MLX5_RX_HASH_FN_INVERTED_XOR8;
} }
static int mlx5e_bits_invert(unsigned long a, int size) int mlx5e_bits_invert(unsigned long a, int size)
{ {
int inv = 0; int inv = 0;
int i; int i;
...@@ -3757,26 +3598,62 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, ...@@ -3757,26 +3598,62 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
return features; return features;
} }
static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
struct mlx5e_txqsq *sq)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
int irqn_not_used, eqn;
struct mlx5_eq *eq;
u32 eqe_count;
if (mlx5_vector2eqn(mdev, sq->cq.mcq.vector, &eqn, &irqn_not_used))
return false;
eq = mlx5_eqn2eq(mdev, eqn);
if (IS_ERR(eq))
return false;
netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n",
eqn, eq->cons_index, eq->irqn);
eqe_count = mlx5_eq_poll_irq_disabled(eq);
if (!eqe_count)
return false;
netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
sq->channel->stats.eq_rearm++;
return true;
}
static void mlx5e_tx_timeout(struct net_device *dev) static void mlx5e_tx_timeout(struct net_device *dev)
{ {
struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_priv *priv = netdev_priv(dev);
bool sched_work = false; bool reopen_channels = false;
int i; int i;
netdev_err(dev, "TX timeout detected\n"); netdev_err(dev, "TX timeout detected\n");
for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) { for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, i);
struct mlx5e_txqsq *sq = priv->txq2sq[i]; struct mlx5e_txqsq *sq = priv->txq2sq[i];
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i))) if (!netif_xmit_stopped(dev_queue))
continue; continue;
sched_work = true; netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x, usecs since last trans: %u\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc,
jiffies_to_usecs(jiffies - dev_queue->trans_start));
/* If we recover a lost interrupt, most likely TX timeout will
* be resolved, skip reopening channels
*/
if (!mlx5e_tx_timeout_eq_recover(dev, sq)) {
clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n", reopen_channels = true;
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc); }
} }
if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state)) if (reopen_channels && test_bit(MLX5E_STATE_OPENED, &priv->state))
schedule_work(&priv->tx_timeout_work); schedule_work(&priv->tx_timeout_work);
} }
......
...@@ -631,7 +631,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev, ...@@ -631,7 +631,7 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return; return;
} }
if (is_last_ethertype_ip(skb, &network_depth)) { if (likely(is_last_ethertype_ip(skb, &network_depth))) {
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum); skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
if (network_depth > ETH_HLEN) if (network_depth > ETH_HLEN)
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld) #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld) #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
#define MLX5E_DECLARE_CH_STAT(type, fld) "ch%d_"#fld, offsetof(type, fld)
struct counter_desc { struct counter_desc {
char format[ETH_GSTRING_LEN]; char format[ETH_GSTRING_LEN];
...@@ -88,6 +89,7 @@ struct mlx5e_sw_stats { ...@@ -88,6 +89,7 @@ struct mlx5e_sw_stats {
u64 rx_cache_empty; u64 rx_cache_empty;
u64 rx_cache_busy; u64 rx_cache_busy;
u64 rx_cache_waive; u64 rx_cache_waive;
u64 ch_eq_rearm;
/* Special handling counters */ /* Special handling counters */
u64 link_down_events_phy; u64 link_down_events_phy;
...@@ -192,6 +194,10 @@ struct mlx5e_sq_stats { ...@@ -192,6 +194,10 @@ struct mlx5e_sq_stats {
u64 dropped; u64 dropped;
}; };
struct mlx5e_ch_stats {
u64 eq_rearm;
};
struct mlx5e_stats { struct mlx5e_stats {
struct mlx5e_sw_stats sw; struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt; struct mlx5e_qcounter_stats qcnt;
...@@ -201,11 +207,17 @@ struct mlx5e_stats { ...@@ -201,11 +207,17 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie; struct mlx5e_pcie_stats pcie;
}; };
enum {
MLX5E_NDO_UPDATE_STATS = BIT(0x1),
};
struct mlx5e_priv; struct mlx5e_priv;
struct mlx5e_stats_grp { struct mlx5e_stats_grp {
u16 update_stats_mask;
int (*get_num_stats)(struct mlx5e_priv *priv); int (*get_num_stats)(struct mlx5e_priv *priv);
int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx); int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx); int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
void (*update_stats)(struct mlx5e_priv *priv);
}; };
extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
......
...@@ -530,6 +530,24 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) ...@@ -530,6 +530,24 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/* Some architectures don't latch interrupts when they are disabled, so using
* mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to
* avoid losing them. It is not recommended to use it, unless this is the last
* resort.
*/
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq)
{
u32 count_eqe;
disable_irq(eq->irqn);
count_eqe = eq->cons_index;
mlx5_eq_int(eq->irqn, eq);
count_eqe = eq->cons_index - count_eqe;
enable_irq(eq->irqn);
return count_eqe;
}
static void init_eq_buf(struct mlx5_eq *eq) static void init_eq_buf(struct mlx5_eq *eq)
{ {
struct mlx5_eqe *eqe; struct mlx5_eqe *eqe;
......
...@@ -89,6 +89,9 @@ ...@@ -89,6 +89,9 @@
/* One more level for tc */ /* One more level for tc */
#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1)
#define KERNEL_NIC_TC_NUM_PRIOS 1
#define KERNEL_NIC_TC_NUM_LEVELS 2
#define ANCHOR_NUM_LEVELS 1 #define ANCHOR_NUM_LEVELS 1
#define ANCHOR_NUM_PRIOS 1 #define ANCHOR_NUM_PRIOS 1
#define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1) #define ANCHOR_MIN_LEVEL (BY_PASS_MIN_LEVEL + 1)
...@@ -134,7 +137,7 @@ static struct init_tree_node { ...@@ -134,7 +137,7 @@ static struct init_tree_node {
ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS, ADD_NS(ADD_MULTIPLE_PRIO(ETHTOOL_NUM_PRIOS,
ETHTOOL_PRIO_NUM_LEVELS))), ETHTOOL_PRIO_NUM_LEVELS))),
ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {}, ADD_PRIO(0, KERNEL_MIN_LEVEL, 0, {},
ADD_NS(ADD_MULTIPLE_PRIO(1, 1), ADD_NS(ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS),
ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS,
KERNEL_NIC_PRIO_NUM_LEVELS))), KERNEL_NIC_PRIO_NUM_LEVELS))),
ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0,
......
...@@ -241,7 +241,8 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) ...@@ -241,7 +241,8 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
{ {
int err; struct ttc_params ttc_params = {};
int tt, err;
priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
MLX5_FLOW_NAMESPACE_KERNEL); MLX5_FLOW_NAMESPACE_KERNEL);
...@@ -256,14 +257,23 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) ...@@ -256,14 +257,23 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE; priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
} }
err = mlx5e_create_inner_ttc_table(priv); mlx5e_set_ttc_basic_params(priv, &ttc_params);
mlx5e_set_inner_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc);
if (err) { if (err) {
netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
err); err);
goto err_destroy_arfs_tables; goto err_destroy_arfs_tables;
} }
err = mlx5e_create_ttc_table(priv); mlx5e_set_ttc_ft_params(&ttc_params);
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn;
err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
if (err) { if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err); err);
...@@ -273,7 +283,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) ...@@ -273,7 +283,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
return 0; return 0;
err_destroy_inner_ttc_table: err_destroy_inner_ttc_table:
mlx5e_destroy_inner_ttc_table(priv); mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
err_destroy_arfs_tables: err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv); mlx5e_arfs_destroy_tables(priv);
...@@ -282,8 +292,8 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) ...@@ -282,8 +292,8 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{ {
mlx5e_destroy_ttc_table(priv); mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_inner_ttc_table(priv); mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc);
mlx5e_arfs_destroy_tables(priv); mlx5e_arfs_destroy_tables(priv);
} }
......
...@@ -116,6 +116,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, ...@@ -116,6 +116,7 @@ int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy,
int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev);
u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq);
void mlx5_cq_tasklet_cb(unsigned long data); void mlx5_cq_tasklet_cb(unsigned long data);
int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
......
...@@ -413,6 +413,7 @@ static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev, ...@@ -413,6 +413,7 @@ static int mlx5_hairpin_create_rq(struct mlx5_core_dev *mdev,
MLX5_SET(rqc, rqc, counter_set_id, params->q_counter); MLX5_SET(rqc, rqc, counter_set_id, params->q_counter);
MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size); MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn); return mlx5_core_create_rq(mdev, in, MLX5_ST_SZ_BYTES(create_rq_in), rqn);
} }
...@@ -430,6 +431,7 @@ static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev, ...@@ -430,6 +431,7 @@ static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size); MLX5_SET(wq, wq, log_hairpin_data_sz, params->log_data_size);
MLX5_SET(wq, wq, log_hairpin_num_packets, params->log_num_packets);
return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn); return mlx5_core_create_sq(mdev, in, MLX5_ST_SZ_BYTES(create_sq_in), sqn);
} }
...@@ -437,28 +439,40 @@ static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev, ...@@ -437,28 +439,40 @@ static int mlx5_hairpin_create_sq(struct mlx5_core_dev *mdev,
static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp, static int mlx5_hairpin_create_queues(struct mlx5_hairpin *hp,
struct mlx5_hairpin_params *params) struct mlx5_hairpin_params *params)
{ {
int err; int i, j, err;
err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn); for (i = 0; i < hp->num_channels; i++) {
err = mlx5_hairpin_create_rq(hp->func_mdev, params, &hp->rqn[i]);
if (err) if (err)
goto out_err_rq; goto out_err_rq;
}
err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn); for (i = 0; i < hp->num_channels; i++) {
err = mlx5_hairpin_create_sq(hp->peer_mdev, params, &hp->sqn[i]);
if (err) if (err)
goto out_err_sq; goto out_err_sq;
}
return 0; return 0;
out_err_sq: out_err_sq:
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn); for (j = 0; j < i; j++)
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[j]);
i = hp->num_channels;
out_err_rq: out_err_rq:
for (j = 0; j < i; j++)
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[j]);
return err; return err;
} }
static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp) static void mlx5_hairpin_destroy_queues(struct mlx5_hairpin *hp)
{ {
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn); int i;
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn);
for (i = 0; i < hp->num_channels; i++) {
mlx5_core_destroy_rq(hp->func_mdev, hp->rqn[i]);
mlx5_core_destroy_sq(hp->peer_mdev, hp->sqn[i]);
}
} }
static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn, static int mlx5_hairpin_modify_rq(struct mlx5_core_dev *func_mdev, u32 rqn,
...@@ -505,40 +519,52 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn, ...@@ -505,40 +519,52 @@ static int mlx5_hairpin_modify_sq(struct mlx5_core_dev *peer_mdev, u32 sqn,
static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp) static int mlx5_hairpin_pair_queues(struct mlx5_hairpin *hp)
{ {
int err; int i, j, err;
/* set peer SQ */ /* set peer SQs */
err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, for (i = 0; i < hp->num_channels; i++) {
err = mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i],
MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn); MLX5_CAP_GEN(hp->func_mdev, vhca_id), hp->rqn[i]);
if (err) if (err)
goto err_modify_sq; goto err_modify_sq;
}
/* set func RQ */ /* set func RQs */
err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn, for (i = 0; i < hp->num_channels; i++) {
err = mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i],
MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY,
MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn); MLX5_CAP_GEN(hp->peer_mdev, vhca_id), hp->sqn[i]);
if (err) if (err)
goto err_modify_rq; goto err_modify_rq;
}
return 0; return 0;
err_modify_rq: err_modify_rq:
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY, for (j = 0; j < i; j++)
MLX5_SQC_STATE_RST, 0, 0); mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[j], MLX5_RQC_STATE_RDY,
MLX5_RQC_STATE_RST, 0, 0);
i = hp->num_channels;
err_modify_sq: err_modify_sq:
for (j = 0; j < i; j++)
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[j], MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0);
return err; return err;
} }
static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp) static void mlx5_hairpin_unpair_queues(struct mlx5_hairpin *hp)
{ {
/* unset func RQ */ int i;
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn, MLX5_RQC_STATE_RDY,
/* unset func RQs */
for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_rq(hp->func_mdev, hp->rqn[i], MLX5_RQC_STATE_RDY,
MLX5_RQC_STATE_RST, 0, 0); MLX5_RQC_STATE_RST, 0, 0);
/* unset peer SQ */ /* unset peer SQs */
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn, MLX5_SQC_STATE_RDY, for (i = 0; i < hp->num_channels; i++)
mlx5_hairpin_modify_sq(hp->peer_mdev, hp->sqn[i], MLX5_SQC_STATE_RDY,
MLX5_SQC_STATE_RST, 0, 0); MLX5_SQC_STATE_RST, 0, 0);
} }
...@@ -550,13 +576,17 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, ...@@ -550,13 +576,17 @@ mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev,
struct mlx5_hairpin *hp; struct mlx5_hairpin *hp;
int size, err; int size, err;
size = sizeof(*hp); size = sizeof(*hp) + params->num_channels * 2 * sizeof(u32);
hp = kzalloc(size, GFP_KERNEL); hp = kzalloc(size, GFP_KERNEL);
if (!hp) if (!hp)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
hp->func_mdev = func_mdev; hp->func_mdev = func_mdev;
hp->peer_mdev = peer_mdev; hp->peer_mdev = peer_mdev;
hp->num_channels = params->num_channels;
hp->rqn = (void *)hp + sizeof(*hp);
hp->sqn = hp->rqn + params->num_channels;
/* alloc and pair func --> peer hairpin */ /* alloc and pair func --> peer hairpin */
err = mlx5_hairpin_create_queues(hp, params); err = mlx5_hairpin_create_queues(hp, params);
......
...@@ -1031,7 +1031,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1031,7 +1031,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_hairpin_queues[0x5]; u8 log_max_hairpin_queues[0x5];
u8 reserved_at_3c8[0x3]; u8 reserved_at_3c8[0x3];
u8 log_max_hairpin_wq_data_sz[0x5]; u8 log_max_hairpin_wq_data_sz[0x5];
u8 reserved_at_3d0[0xb]; u8 reserved_at_3d0[0x3];
u8 log_max_hairpin_num_packets[0x5];
u8 reserved_at_3d8[0x3];
u8 log_max_wq_sz[0x5]; u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1]; u8 nic_vport_change_event[0x1];
...@@ -1172,7 +1174,9 @@ struct mlx5_ifc_wq_bits { ...@@ -1172,7 +1174,9 @@ struct mlx5_ifc_wq_bits {
u8 reserved_at_118[0x3]; u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5]; u8 log_wq_sz[0x5];
u8 reserved_at_120[0xb]; u8 reserved_at_120[0x3];
u8 log_hairpin_num_packets[0x5];
u8 reserved_at_128[0x3];
u8 log_hairpin_data_sz[0x5]; u8 log_hairpin_data_sz[0x5];
u8 reserved_at_130[0x5]; u8 reserved_at_130[0x5];
......
...@@ -77,15 +77,19 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); ...@@ -77,15 +77,19 @@ void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
struct mlx5_hairpin_params { struct mlx5_hairpin_params {
u8 log_data_size; u8 log_data_size;
u8 log_num_packets;
u16 q_counter; u16 q_counter;
int num_channels;
}; };
struct mlx5_hairpin { struct mlx5_hairpin {
struct mlx5_core_dev *func_mdev; struct mlx5_core_dev *func_mdev;
struct mlx5_core_dev *peer_mdev; struct mlx5_core_dev *peer_mdev;
u32 rqn; int num_channels;
u32 sqn;
u32 *rqn;
u32 *sqn;
}; };
struct mlx5_hairpin * struct mlx5_hairpin *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment