Commit 4b52416a authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx5-fixes'

Saeed Mahameed says:

====================
Mellanox 100G mlx5 fixes 28-12-2016

Some fixes for mlx5 core and ethernet driver.

for -stable:
    net/mlx5: Check FW limitations on log_max_qp before setting it
    net/mlx5: Cancel recovery work in remove flow
    net/mlx5: Avoid shadowing numa_node
    net/mlx5: Mask destination mac value in ethtool steering rules
    net/mlx5: Prevent setting multicast macs for VFs
    net/mlx5e: Don't sync netdev state when not registered
    net/mlx5e: Disable netdev after close
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0df0f207 37f304d1
......@@ -723,6 +723,9 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
int i;
struct ieee_ets ets;
if (!MLX5_CAP_GEN(priv->mdev, ets))
return;
memset(&ets, 0, sizeof(ets));
ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
for (i = 0; i < ets.ets_cap; i++) {
......
......@@ -171,7 +171,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
return NUM_SW_COUNTERS +
MLX5E_NUM_Q_CNTRS(priv) +
NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
NUM_PCIE_COUNTERS +
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv) +
......@@ -219,14 +218,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].format);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_perf_stats_desc[i].format);
for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pcie_tas_stats_desc[i].format);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
......@@ -339,14 +330,6 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
pport_2819_stats_desc, i);
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
pcie_perf_stats_desc, i);
for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
pcie_tas_stats_desc, i);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
......
......@@ -247,6 +247,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
}
if (fs->flow_type & FLOW_MAC_EXT &&
!is_zero_ether_addr(fs->m_ext.h_dest)) {
mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
outer_headers_c, dmac_47_16),
fs->m_ext.h_dest);
......
......@@ -291,36 +291,12 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
&qcnt->rx_out_of_buffer);
}
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
{
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
struct mlx5_core_dev *mdev = priv->mdev;
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
void *out;
u32 *in;
in = mlx5_vzalloc(sz);
if (!in)
return;
out = pcie_stats->pcie_perf_counters;
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
out = pcie_stats->pcie_tas_counters;
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
kvfree(in);
}
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
mlx5e_update_q_counter(priv);
mlx5e_update_vport_counters(priv);
mlx5e_update_pport_counters(priv);
mlx5e_update_sw_counters(priv);
mlx5e_update_pcie_counters(priv);
}
void mlx5e_update_stats_work(struct work_struct *work)
......@@ -3805,14 +3781,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5_lag_add(mdev, netdev);
if (mlx5e_vxlan_allowed(mdev)) {
rtnl_lock();
udp_tunnel_get_rx_info(netdev);
rtnl_unlock();
}
mlx5e_enable_async_events(priv);
queue_work(priv->wq, &priv->set_rx_mode_work);
if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
......@@ -3822,6 +3791,18 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
rep.netdev = netdev;
mlx5_eswitch_register_vport_rep(esw, 0, &rep);
}
if (netdev->reg_state != NETREG_REGISTERED)
return;
/* Device already registered: sync netdev system state */
if (mlx5e_vxlan_allowed(mdev)) {
rtnl_lock();
udp_tunnel_get_rx_info(netdev);
rtnl_unlock();
}
queue_work(priv->wq, &priv->set_rx_mode_work);
}
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
......@@ -3966,10 +3947,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
const struct mlx5e_profile *profile = priv->profile;
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
if (profile->disable)
profile->disable(priv);
flush_workqueue(priv->wq);
rtnl_lock();
if (netif_running(netdev))
......@@ -3977,6 +3954,10 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
netif_device_detach(netdev);
rtnl_unlock();
if (profile->disable)
profile->disable(priv);
flush_workqueue(priv->wq);
mlx5e_destroy_q_counter(priv);
profile->cleanup_rx(priv);
mlx5e_close_drop_rq(priv);
......
......@@ -39,7 +39,7 @@
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
(*(u32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
......@@ -276,32 +276,6 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
#define PCIE_PERF_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_PERF_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_TAS_OFF(c) \
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
#define PCIE_TAS_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
counter_set.pcie_tas_cntrs_grp_data_layout.c)
struct mlx5e_pcie_stats {
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
__be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
};
static const struct counter_desc pcie_perf_stats_desc[] = {
{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
};
static const struct counter_desc pcie_tas_stats_desc[] = {
{ "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
{ "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
};
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
......@@ -386,8 +360,6 @@ static const struct counter_desc sq_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
#define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc)
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
......@@ -397,7 +369,6 @@ static const struct counter_desc sq_stats_desc[] = {
NUM_PPORT_2819_COUNTERS + \
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
NUM_PPORT_PRIO)
#define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
......@@ -406,7 +377,6 @@ struct mlx5e_stats {
struct mlx5e_qcounter_stats qcnt;
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
struct mlx5e_pcie_stats pcie;
struct rtnl_link_stats64 vf_vport;
};
......
......@@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
return -EINVAL;
mutex_lock(&esw->state_lock);
......
......@@ -695,6 +695,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
if (err)
goto err_reps;
}
/* disable PF RoCE so missed packets don't go through RoCE steering */
mlx5_dev_list_lock();
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
return 0;
err_reps:
......@@ -718,6 +724,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
/* enable back PF RoCE */
mlx5_dev_list_lock();
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_dev_list_unlock();
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err) {
......
......@@ -1263,6 +1263,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
handle = add_rule_fte(fte, fg, dest, dest_num, false);
if (IS_ERR(handle)) {
unlock_ref_node(&fte->node);
kfree(fte);
goto unlock_fg;
}
......
......@@ -503,6 +503,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
to_fw_pkey_sz(dev, 128));
/* Check log_max_qp from HCA caps to set in current profile */
if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
profile[prof_sel].log_max_qp,
MLX5_CAP_GEN_MAX(dev, log_max_qp));
profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
}
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
prof->log_max_qp);
......@@ -575,7 +582,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
struct mlx5_priv *priv = &mdev->priv;
struct msix_entry *msix = priv->msix_arr;
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
int numa_node = priv->numa_node;
int err;
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
......@@ -583,7 +589,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
return -ENOMEM;
}
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
priv->irq_info[i].mask);
err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
......@@ -1189,6 +1195,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
{
int err = 0;
mlx5_drain_health_wq(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
......@@ -1351,10 +1359,9 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
/* In case of kernel call save the pci state and drain health wq */
/* In case of kernel call save the pci state */
if (state) {
pci_save_state(pdev);
mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
}
......
......@@ -1071,11 +1071,6 @@ enum {
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
enum {
MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
};
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
......
......@@ -123,7 +123,6 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
MLX5_REG_MPCNT = 0x9051,
};
enum mlx5_dcbx_oper_mode {
......
......@@ -1757,80 +1757,6 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
u8 reserved_at_4c0[0x300];
};
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
u8 life_time_counter_high[0x20];
u8 life_time_counter_low[0x20];
u8 rx_errors[0x20];
u8 tx_errors[0x20];
u8 l0_to_recovery_eieos[0x20];
u8 l0_to_recovery_ts[0x20];
u8 l0_to_recovery_framing[0x20];
u8 l0_to_recovery_retrain[0x20];
u8 crc_error_dllp[0x20];
u8 crc_error_tlp[0x20];
u8 reserved_at_140[0x680];
};
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
u8 life_time_counter_high[0x20];
u8 life_time_counter_low[0x20];
u8 time_to_boot_image_start[0x20];
u8 time_to_link_image[0x20];
u8 calibration_time[0x20];
u8 time_to_first_perst[0x20];
u8 time_to_detect_state[0x20];
u8 time_to_l0[0x20];
u8 time_to_crs_en[0x20];
u8 time_to_plastic_image_start[0x20];
u8 time_to_iron_image_start[0x20];
u8 perst_handler[0x20];
u8 times_in_l1[0x20];
u8 times_in_l23[0x20];
u8 dl_down[0x20];
u8 config_cycle1usec[0x20];
u8 config_cycle2to7usec[0x20];
u8 config_cycle_8to15usec[0x20];
u8 config_cycle_16_to_63usec[0x20];
u8 config_cycle_64usec[0x20];
u8 correctable_err_msg_sent[0x20];
u8 non_fatal_err_msg_sent[0x20];
u8 fatal_err_msg_sent[0x20];
u8 reserved_at_2e0[0x4e0];
};
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
......@@ -2995,12 +2921,6 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
u8 reserved_at_0[0x7c0];
};
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
u8 reserved_at_0[0x7c0];
};
union mlx5_ifc_event_auto_bits {
struct mlx5_ifc_comp_event_bits comp_event;
struct mlx5_ifc_dct_events_bits dct_events;
......@@ -7320,18 +7240,6 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
struct mlx5_ifc_mpcnt_reg_bits {
u8 reserved_at_0[0x8];
u8 pcie_index[0x8];
u8 reserved_at_10[0xa];
u8 grp[0x6];
u8 clr[0x1];
u8 reserved_at_21[0x1f];
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
};
struct mlx5_ifc_ppad_reg_bits {
u8 reserved_at_0[0x3];
u8 single_mac[0x1];
......@@ -7937,7 +7845,6 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment