Commit 0109841f authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-updates-2021-12-02' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2021-12-02

Misc updates to mlx5 driver

1) Various code cleanups
2) Error path handling fixes of latest features
3) Print more information on pci error handling
4) Dynamically resize flow counters query buffer
====================

Link: https://lore.kernel.org/r/20211203005622.183325-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents fc993be3 b247f32a
......@@ -115,6 +115,7 @@ config MLX5_TC_CT
config MLX5_TC_SAMPLE
bool "MLX5 TC sample offload support"
depends on MLX5_CLS_ACT
depends on PSAMPLE=y || PSAMPLE=n || MLX5_CORE=m
default y
help
Say Y here if you want to support offloading sample rules via tc
......
......@@ -173,7 +173,7 @@ struct page_pool;
#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\
ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_ALIGNMENT)
#define MLX5E_MAX_KLM_PER_WQE(mdev) \
#define MLX5E_MAX_KLM_PER_WQE \
MLX5E_KLM_ENTRIES_PER_WQE(MLX5E_TX_MPW_MAX_NUM_DS << MLX5_MKEY_BSF_OCTO_SIZE)
#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
......@@ -1057,7 +1057,6 @@ int mlx5e_safe_switch_params(struct mlx5e_priv *priv,
mlx5e_fp_preactivate preactivate,
void *context, bool reset);
int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed(struct mlx5e_priv *priv);
int mlx5e_num_channels_changed_ctx(struct mlx5e_priv *priv, void *context);
void mlx5e_activate_priv_channels(struct mlx5e_priv *priv);
void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv);
......
......@@ -717,7 +717,7 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev,
int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz));
u32 wqebbs;
max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev);
max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE;
max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param);
max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr;
rest = max_hd_per_wqe % max_klm_per_umr;
......
......@@ -2598,7 +2598,7 @@ static void mlx5e_set_default_xps_cpumasks(struct mlx5e_priv *priv,
}
}
int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
static int mlx5e_num_channels_changed(struct mlx5e_priv *priv)
{
u16 count = priv->channels.params.num_channels;
int err;
......
......@@ -619,7 +619,7 @@ static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_icosq *sq = rq->icosq;
int i, err, max_klm_entries, len;
max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev);
max_klm_entries = MLX5E_MAX_KLM_PER_WQE;
klm_entries = bitmap_find_window(shampo->bitmap,
shampo->hd_per_wqe,
shampo->hd_per_wq, shampo->pi);
......
......@@ -1441,7 +1441,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
metadata);
if (err)
return err;
goto err_out;
}
}
......@@ -1457,13 +1457,15 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
if (attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule is only supported on chain 0");
return -EOPNOTSUPP;
err = -EOPNOTSUPP;
goto err_out;
}
if (attr->dest_chain) {
NL_SET_ERR_MSG_MOD(extack,
"Internal port rule offload doesn't support goto action");
return -EOPNOTSUPP;
err = -EOPNOTSUPP;
goto err_out;
}
int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
......@@ -1471,8 +1473,10 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
flow_flag_test(flow, EGRESS) ?
MLX5E_TC_INT_PORT_EGRESS :
MLX5E_TC_INT_PORT_INGRESS);
if (IS_ERR(int_port))
return PTR_ERR(int_port);
if (IS_ERR(int_port)) {
err = PTR_ERR(int_port);
goto err_out;
}
esw_attr->int_port = int_port;
}
......@@ -3355,11 +3359,9 @@ add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
static int validate_goto_chain(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow,
const struct flow_action_entry *act,
u32 actions,
struct netlink_ext_ack *extack)
{
bool is_esw = mlx5e_is_eswitch_flow(flow);
struct mlx5_flow_attr *attr = flow->attr;
bool ft_flow = mlx5e_is_ft_flow(flow);
u32 dest_chain = act->chain_index;
struct mlx5_fs_chains *chains;
......@@ -3380,7 +3382,7 @@ static int validate_goto_chain(struct mlx5e_priv *priv,
}
if (!mlx5_chains_backwards_supported(chains) &&
dest_chain <= attr->chain) {
dest_chain <= flow->attr->chain) {
NL_SET_ERR_MSG_MOD(extack,
"Goto lower numbered chain isn't supported");
return -EOPNOTSUPP;
......@@ -3392,7 +3394,7 @@ static int validate_goto_chain(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
if (flow->attr->action & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
!reformat_and_fwd) {
NL_SET_ERR_MSG_MOD(extack,
......@@ -3443,19 +3445,9 @@ actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
}
static int
parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
flow_action_supported(struct flow_action *flow_action,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
const struct flow_action_entry *act;
struct mlx5_nic_flow_attr *nic_attr;
u32 action = 0;
int err, i;
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
......@@ -3467,6 +3459,26 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
return 0;
}
static int
parse_tc_nic_actions(struct mlx5e_priv *priv,
struct flow_action *flow_action,
struct mlx5e_tc_flow *flow,
struct netlink_ext_ack *extack)
{
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr = flow->attr;
struct pedit_headers_action hdrs[2] = {};
const struct flow_action_entry *act;
struct mlx5_nic_flow_attr *nic_attr;
int err, i;
err = flow_action_supported(flow_action, extack);
if (err)
return err;
nic_attr = attr->nic_attr;
nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
parse_attr = attr->parse_attr;
......@@ -3474,11 +3486,11 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
case FLOW_ACTION_MANGLE:
......@@ -3488,19 +3500,19 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
if (err)
return err;
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
break;
case FLOW_ACTION_VLAN_MANGLE:
err = add_vlan_rewrite_action(priv,
MLX5_FLOW_NAMESPACE_KERNEL,
act, parse_attr, hdrs,
&action, extack);
&attr->action, extack);
if (err)
return err;
break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
if (csum_offload_supported(priv, attr->action,
act->csum_flags,
extack))
break;
......@@ -3513,7 +3525,7 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
same_hw_devs(priv, netdev_priv(peer_dev))) {
parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
flow_flag_set(flow, HAIRPIN);
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
} else {
NL_SET_ERR_MSG_MOD(extack,
......@@ -3534,16 +3546,15 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
}
nic_attr->flow_tag = mark;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
}
break;
case FLOW_ACTION_GOTO:
err = validate_goto_chain(priv, flow, act, action,
extack);
err = validate_goto_chain(priv, flow, act, extack);
if (err)
return err;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
......@@ -3563,8 +3574,6 @@ parse_tc_nic_actions(struct mlx5e_priv *priv,
}
}
attr->action = action;
if (attr->dest_chain && parse_attr->mirred_ifindex[0]) {
NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
return -EOPNOTSUPP;
......@@ -3684,7 +3693,6 @@ static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
static int add_vlan_push_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
struct net_device **out_dev,
u32 *action,
struct netlink_ext_ack *extack)
{
struct net_device *vlan_dev = *out_dev;
......@@ -3696,7 +3704,7 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
};
int err;
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, &attr->action, extack);
if (err)
return err;
......@@ -3707,14 +3715,13 @@ static int add_vlan_push_action(struct mlx5e_priv *priv,
return -ENODEV;
if (is_vlan_dev(*out_dev))
err = add_vlan_push_action(priv, attr, out_dev, action, extack);
err = add_vlan_push_action(priv, attr, out_dev, extack);
return err;
}
static int add_vlan_pop_action(struct mlx5e_priv *priv,
struct mlx5_flow_attr *attr,
u32 *action,
struct netlink_ext_ack *extack)
{
struct flow_action_entry vlan_act = {
......@@ -3725,7 +3732,8 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
nest_level = attr->parse_attr->filter_dev->lower_level -
priv->netdev->lower_level;
while (nest_level--) {
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action, extack);
err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr,
&attr->action, extack);
if (err)
return err;
}
......@@ -3882,21 +3890,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
const struct flow_action_entry *act;
struct mlx5_esw_flow_attr *esw_attr;
bool encap = false, decap = false;
u32 action = attr->action;
int err, i, if_count = 0;
bool ptype_host = false;
bool mpls_push = false;
if (!flow_action_has_entries(flow_action)) {
NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
return -EINVAL;
}
if (!flow_action_hw_stats_check(flow_action, extack,
FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
return -EOPNOTSUPP;
}
err = flow_action_supported(flow_action, extack);
if (err)
return err;
esw_attr = attr->esw_attr;
parse_attr = attr->parse_attr;
......@@ -3904,7 +3904,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
flow_action_for_each(i, act, flow_action) {
switch (act->id) {
case FLOW_ACTION_ACCEPT:
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->flags |= MLX5_ESW_ATTR_FLAG_ACCEPT;
break;
......@@ -3918,7 +3918,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
ptype_host = true;
break;
case FLOW_ACTION_DROP:
action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
break;
case FLOW_ACTION_TRAP:
......@@ -3927,7 +3927,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
"action trap is supported as a sole action only");
return -EOPNOTSUPP;
}
action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
attr->action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT);
attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
break;
......@@ -3959,7 +3959,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
}
parse_attr->eth.h_proto = act->mpls_pop.proto;
action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
flow_flag_set(flow, L3_TO_L2_DECAP);
break;
case FLOW_ACTION_MANGLE:
......@@ -3970,12 +3970,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return err;
if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
esw_attr->split_count = esw_attr->out_count;
}
break;
case FLOW_ACTION_CSUM:
if (csum_offload_supported(priv, action,
if (csum_offload_supported(priv, attr->action,
act->csum_flags, extack))
break;
......@@ -4011,12 +4011,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
err = mlx5e_set_fwd_to_int_port_actions(priv, attr, out_dev->ifindex,
MLX5E_TC_INT_PORT_INGRESS,
&action, esw_attr->out_count);
&attr->action, esw_attr->out_count);
if (err)
return err;
......@@ -4061,7 +4061,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (encap) {
parse_attr->mirred_ifindex[esw_attr->out_count] =
......@@ -4096,16 +4096,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -ENODEV;
if (is_vlan_dev(out_dev)) {
err = add_vlan_push_action(priv, attr,
&out_dev,
&action, extack);
err = add_vlan_push_action(priv, attr, &out_dev, extack);
if (err)
return err;
}
if (is_vlan_dev(parse_attr->filter_dev)) {
err = add_vlan_pop_action(priv, attr,
&action, extack);
err = add_vlan_pop_action(priv, attr, extack);
if (err)
return err;
}
......@@ -4138,7 +4135,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
err = mlx5e_set_fwd_to_int_port_actions(priv, attr,
out_dev->ifindex,
MLX5E_TC_INT_PORT_EGRESS,
&action,
&attr->action,
esw_attr->out_count);
if (err)
return err;
......@@ -4176,15 +4173,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
case FLOW_ACTION_VLAN_PUSH:
case FLOW_ACTION_VLAN_POP:
if (act->id == FLOW_ACTION_VLAN_PUSH &&
(action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
/* Replace vlan pop+push with vlan modify */
action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = add_vlan_rewrite_action(priv,
MLX5_FLOW_NAMESPACE_FDB,
act, parse_attr, hdrs,
&action, extack);
&attr->action, extack);
} else {
err = parse_tc_vlan_action(priv, act, esw_attr, &action, extack);
err = parse_tc_vlan_action(priv, act, esw_attr, &attr->action,
extack);
}
if (err)
return err;
......@@ -4195,7 +4193,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
err = add_vlan_rewrite_action(priv,
MLX5_FLOW_NAMESPACE_FDB,
act, parse_attr, hdrs,
&action, extack);
&attr->action, extack);
if (err)
return err;
......@@ -4205,12 +4203,11 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
decap = true;
break;
case FLOW_ACTION_GOTO:
err = validate_goto_chain(priv, flow, act, action,
extack);
err = validate_goto_chain(priv, flow, act, extack);
if (err)
return err;
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
attr->dest_chain = act->chain_index;
break;
......@@ -4254,23 +4251,18 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
/* always set IP version for indirect table handling */
attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
/* For prio tag mode, replace vlan pop with rewrite vlan prio
* tag rewrite.
*/
action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
&action, extack);
&attr->action, extack);
if (err)
return err;
}
attr->action = action;
err = actions_prepare_mod_hdr_actions(priv, flow, attr, hdrs, extack);
if (err)
return err;
......@@ -4493,6 +4485,9 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
if (err)
goto err_free;
/* always set IP version for indirect table handling */
flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
if (err)
goto err_free;
......
......@@ -590,6 +590,7 @@ static int esw_qos_create(struct mlx5_eswitch *esw, struct netlink_ext_ack *exta
if (IS_ERR(esw->qos.group0)) {
esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n",
PTR_ERR(esw->qos.group0));
err = PTR_ERR(esw->qos.group0);
goto err_group0;
}
}
......
......@@ -38,9 +38,10 @@
#include "fs_cmd.h"
#define MLX5_FC_STATS_PERIOD msecs_to_jiffies(1000)
#define MLX5_FC_BULK_QUERY_ALLOC_PERIOD msecs_to_jiffies(180 * 1000)
/* Max number of counters to query in bulk read is 32K */
#define MLX5_SW_MAX_COUNTERS_BULK BIT(15)
#define MLX5_SF_NUM_COUNTERS_BULK 8
#define MLX5_INIT_COUNTERS_BULK 8
#define MLX5_FC_POOL_MAX_THRESHOLD BIT(18)
#define MLX5_FC_POOL_USED_BUFF_RATIO 10
......@@ -145,13 +146,15 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock);
}
static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
static int get_init_bulk_query_len(struct mlx5_core_dev *dev)
{
int num_counters_bulk = mlx5_core_is_sf(dev) ?
MLX5_SF_NUM_COUNTERS_BULK :
MLX5_SW_MAX_COUNTERS_BULK;
return min_t(int, MLX5_INIT_COUNTERS_BULK,
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
return min_t(int, num_counters_bulk,
static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
{
return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
......@@ -177,7 +180,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
bool query_more_counters = (first->id <= last_id);
int max_bulk_len = get_max_bulk_query_len(dev);
int cur_bulk_len = fc_stats->bulk_query_len;
u32 *data = fc_stats->bulk_query_out;
struct mlx5_fc *counter = first;
u32 bulk_base_id;
......@@ -189,7 +192,7 @@ static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
bulk_base_id = counter->id & ~0x3;
/* number of counters to query inc. the last counter */
bulk_len = min_t(int, max_bulk_len,
bulk_len = min_t(int, cur_bulk_len,
ALIGN(last_id - bulk_base_id + 1, 4));
err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
......@@ -230,6 +233,41 @@ static void mlx5_fc_release(struct mlx5_core_dev *dev, struct mlx5_fc *counter)
mlx5_fc_free(dev, counter);
}
static void mlx5_fc_stats_bulk_query_size_increase(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
int max_bulk_len = get_max_bulk_query_len(dev);
unsigned long now = jiffies;
u32 *bulk_query_out_tmp;
int max_out_len;
if (fc_stats->bulk_query_alloc_failed &&
time_before(now, fc_stats->next_bulk_query_alloc))
return;
max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
bulk_query_out_tmp = kzalloc(max_out_len, GFP_KERNEL);
if (!bulk_query_out_tmp) {
mlx5_core_warn_once(dev,
"Can't increase flow counters bulk query buffer size, insufficient memory, bulk_size(%d)\n",
max_bulk_len);
fc_stats->bulk_query_alloc_failed = true;
fc_stats->next_bulk_query_alloc =
now + MLX5_FC_BULK_QUERY_ALLOC_PERIOD;
return;
}
kfree(fc_stats->bulk_query_out);
fc_stats->bulk_query_out = bulk_query_out_tmp;
fc_stats->bulk_query_len = max_bulk_len;
if (fc_stats->bulk_query_alloc_failed) {
mlx5_core_info(dev,
"Flow counters bulk query buffer size increased, bulk_size(%d)\n",
max_bulk_len);
fc_stats->bulk_query_alloc_failed = false;
}
}
static void mlx5_fc_stats_work(struct work_struct *work)
{
struct mlx5_core_dev *dev = container_of(work, struct mlx5_core_dev,
......@@ -247,15 +285,22 @@ static void mlx5_fc_stats_work(struct work_struct *work)
queue_delayed_work(fc_stats->wq, &fc_stats->work,
fc_stats->sampling_interval);
llist_for_each_entry(counter, addlist, addlist)
llist_for_each_entry(counter, addlist, addlist) {
mlx5_fc_stats_insert(dev, counter);
fc_stats->num_counters++;
}
llist_for_each_entry_safe(counter, tmp, dellist, dellist) {
mlx5_fc_stats_remove(dev, counter);
mlx5_fc_release(dev, counter);
fc_stats->num_counters--;
}
if (fc_stats->bulk_query_len < get_max_bulk_query_len(dev) &&
fc_stats->num_counters > get_init_bulk_query_len(dev))
mlx5_fc_stats_bulk_query_size_increase(dev);
if (time_before(now, fc_stats->next_query) ||
list_empty(&fc_stats->counters))
return;
......@@ -378,8 +423,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
int max_bulk_len;
int max_out_len;
int init_bulk_len;
int init_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
......@@ -387,11 +432,12 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
max_bulk_len = get_max_bulk_query_len(dev);
max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
init_bulk_len = get_init_bulk_query_len(dev);
init_out_len = mlx5_cmd_fc_get_bulk_query_out_len(init_bulk_len);
fc_stats->bulk_query_out = kzalloc(init_out_len, GFP_KERNEL);
if (!fc_stats->bulk_query_out)
return -ENOMEM;
fc_stats->bulk_query_len = init_bulk_len;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
......
......@@ -110,7 +110,7 @@ void mlx5i_cleanup(struct mlx5e_priv *priv)
static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
{
struct mlx5e_sw_stats s = { 0 };
struct rtnl_link_stats64 s = {};
int i, j;
for (i = 0; i < priv->stats_nch; i++) {
......@@ -128,11 +128,17 @@ static void mlx5i_grp_sw_update_stats(struct mlx5e_priv *priv)
s.tx_packets += sq_stats->packets;
s.tx_bytes += sq_stats->bytes;
s.tx_queue_dropped += sq_stats->dropped;
s.tx_dropped += sq_stats->dropped;
}
}
memcpy(&priv->stats.sw, &s, sizeof(s));
memset(&priv->stats.sw, 0, sizeof(s));
priv->stats.sw.rx_packets = s.rx_packets;
priv->stats.sw.rx_bytes = s.rx_bytes;
priv->stats.sw.tx_packets = s.tx_packets;
priv->stats.sw.tx_bytes = s.tx_bytes;
priv->stats.sw.tx_queue_dropped = s.tx_dropped;
}
void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
......
......@@ -1604,12 +1604,28 @@ static void remove_one(struct pci_dev *pdev)
mlx5_devlink_free(devlink);
}
#define mlx5_pci_trace(dev, fmt, ...) ({ \
struct mlx5_core_dev *__dev = (dev); \
mlx5_core_info(__dev, "%s Device state = %d health sensors: %d pci_status: %d. " fmt, \
__func__, __dev->state, mlx5_health_check_fatal_sensors(__dev), \
__dev->pci_status, ##__VA_ARGS__); \
})
static const char *result2str(enum pci_ers_result result)
{
return result == PCI_ERS_RESULT_NEED_RESET ? "need reset" :
result == PCI_ERS_RESULT_DISCONNECT ? "disconnect" :
result == PCI_ERS_RESULT_RECOVERED ? "recovered" :
"unknown";
}
static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
pci_channel_state_t state)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
enum pci_ers_result res;
mlx5_core_info(dev, "%s was called\n", __func__);
mlx5_pci_trace(dev, "Enter, pci channel state = %d\n", state);
mlx5_enter_error_state(dev, false);
mlx5_error_sw_reset(dev);
......@@ -1617,8 +1633,11 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
return state == pci_channel_io_perm_failure ?
res = state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
mlx5_pci_trace(dev, "Exit, result = %d, %s\n", res, result2str(res));
return res;
}
/* wait for the device to show vital signs by waiting
......@@ -1652,28 +1671,34 @@ static int wait_vital(struct pci_dev *pdev)
static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
{
enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT;
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
mlx5_core_info(dev, "%s was called\n", __func__);
mlx5_pci_trace(dev, "Enter\n");
err = mlx5_pci_enable_device(dev);
if (err) {
mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
__func__, err);
return PCI_ERS_RESULT_DISCONNECT;
goto out;
}
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
if (wait_vital(pdev)) {
mlx5_core_err(dev, "%s: wait_vital timed out\n", __func__);
return PCI_ERS_RESULT_DISCONNECT;
err = wait_vital(pdev);
if (err) {
mlx5_core_err(dev, "%s: wait vital failed with error code: %d\n",
__func__, err);
goto out;
}
return PCI_ERS_RESULT_RECOVERED;
res = PCI_ERS_RESULT_RECOVERED;
out:
mlx5_pci_trace(dev, "Exit, err = %d, result = %d, %s\n", err, res, result2str(res));
return res;
}
static void mlx5_pci_resume(struct pci_dev *pdev)
......@@ -1681,14 +1706,12 @@ static void mlx5_pci_resume(struct pci_dev *pdev)
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
int err;
mlx5_core_info(dev, "%s was called\n", __func__);
mlx5_pci_trace(dev, "Enter, loading driver..\n");
err = mlx5_load_one(dev);
if (err)
mlx5_core_err(dev, "%s: mlx5_load_one failed with error code: %d\n",
__func__, err);
else
mlx5_core_info(dev, "%s: device recovered\n", __func__);
mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
!err ? "recovered" : "Failed");
}
static const struct pci_error_handlers mlx5_err_handler = {
......
......@@ -247,7 +247,7 @@ int mlx5_sf_hw_table_init(struct mlx5_core_dev *dev)
{
struct mlx5_sf_hw_table *table;
u16 max_ext_fn = 0;
u16 ext_base_id;
u16 ext_base_id = 0;
u16 max_fn = 0;
u16 base_id;
int err;
......
......@@ -478,6 +478,10 @@ struct mlx5_fc_stats {
unsigned long next_query;
unsigned long sampling_interval; /* jiffies */
u32 *bulk_query_out;
int bulk_query_len;
size_t num_counters;
bool bulk_query_alloc_failed;
unsigned long next_bulk_query_alloc;
struct mlx5_fc_pool fc_pool;
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment