Commit 6da410d9 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5e-fixes-2018-09-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2018-09-05

This pull request contains some fixes for mlx5 etherent netdevice and
core driver.

Please pull and let me know if there's any problem.

For -stable v4.9:
('net/mlx5: Fix debugfs cleanup in the device init/remove flow')

For -stable v4.12:
("net/mlx5: E-Switch, Fix memory leak when creating switchdev mode FDB tables")

For -stable v4.13:
("net/mlx5: Fix use-after-free in self-healing flow")

For -stable v4.14:
("net/mlx5: Check for error in mlx5_attach_interface")

For -stable v4.15:
("net/mlx5: Fix not releasing read lock when adding flow rules")

For -stable v4.17:
("net/mlx5: Fix possible deadlock from lockdep when adding fte to fg")

For -stable v4.18:
("net/mlx5: Use u16 for Work Queue buffer fragment size")
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fce471e3 ad9421e3
......@@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
delayed_event_start(priv);
dev_ctx->context = intf->add(dev);
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
if (intf->attach)
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
if (dev_ctx->context) {
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
if (intf->attach)
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
spin_lock_irq(&priv->ctx_lock);
list_add_tail(&dev_ctx->list, &priv->ctx_list);
......@@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv
if (intf->attach) {
if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))
goto out;
intf->attach(dev, dev_ctx->context);
if (intf->attach(dev, dev_ctx->context))
goto out;
set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state);
} else {
if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state))
goto out;
dev_ctx->context = intf->add(dev);
if (!dev_ctx->context)
goto out;
set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state);
}
......@@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol)
}
}
static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
{
return (u16)((dev->pdev->bus->number << 8) |
return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
(dev->pdev->bus->number << 8) |
PCI_SLOT(dev->pdev->devfn));
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
u16 pci_id = mlx5_gen_pci_id(dev);
u32 pci_id = mlx5_gen_pci_id(dev);
struct mlx5_core_dev *res = NULL;
struct mlx5_core_dev *tmp_dev;
struct mlx5_priv *priv;
......
......@@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v,
{
if (psrc_m) {
MLX5E_FTE_SET(headers_c, udp_sport, 0xffff);
MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v));
MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v));
}
if (pdst_m) {
......
......@@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
if (err)
goto miss_rule_err;
kvfree(flow_group_in);
return 0;
miss_rule_err:
......
......@@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head)
return version;
}
static struct fs_fte *
lookup_fte_locked(struct mlx5_flow_group *g,
u32 *match_value,
bool take_write)
{
struct fs_fte *fte_tmp;
if (take_write)
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
else
nested_down_read_ref_node(&g->node, FS_LOCK_PARENT);
fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value,
rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node)) {
fte_tmp = NULL;
goto out;
}
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
out:
if (take_write)
up_write_ref_node(&g->node);
else
up_read_ref_node(&g->node);
return fte_tmp;
}
static struct mlx5_flow_handle *
try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct list_head *match_head,
......@@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
if (IS_ERR(fte))
return ERR_PTR(-ENOMEM);
list_for_each_entry(iter, match_head, list) {
nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
}
search_again_locked:
version = matched_fgs_get_version(match_head);
/* Try to find a fg that already contains a matching fte */
......@@ -1611,20 +1634,9 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
struct fs_fte *fte_tmp;
g = iter->g;
fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
rhash_fte);
if (!fte_tmp || !tree_get_node(&fte_tmp->node))
fte_tmp = lookup_fte_locked(g, spec->match_value, take_write);
if (!fte_tmp)
continue;
nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
if (!take_write) {
list_for_each_entry(iter, match_head, list)
up_read_ref_node(&iter->g->node);
} else {
list_for_each_entry(iter, match_head, list)
up_write_ref_node(&iter->g->node);
}
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte_tmp);
up_write_ref_node(&fte_tmp->node);
......@@ -1633,19 +1645,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
return rule;
}
/* No group with matching fte found. Try to add a new fte to any
* matching fg.
*/
if (!take_write) {
list_for_each_entry(iter, match_head, list)
up_read_ref_node(&iter->g->node);
list_for_each_entry(iter, match_head, list)
nested_down_write_ref_node(&iter->g->node,
FS_LOCK_PARENT);
take_write = true;
}
/* Check the ft version, for case that new flow group
* was added while the fgs weren't locked
*/
......@@ -1657,27 +1656,30 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
/* Check the fgs version, for case the new FTE with the
* same values was added while the fgs weren't locked
*/
if (version != matched_fgs_get_version(match_head))
if (version != matched_fgs_get_version(match_head)) {
take_write = true;
goto search_again_locked;
}
list_for_each_entry(iter, match_head, list) {
g = iter->g;
if (!g->node.active)
continue;
nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
err = insert_fte(g, fte);
if (err) {
up_write_ref_node(&g->node);
if (err == -ENOSPC)
continue;
list_for_each_entry(iter, match_head, list)
up_write_ref_node(&iter->g->node);
kmem_cache_free(steering->ftes_cache, fte);
return ERR_PTR(err);
}
nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
list_for_each_entry(iter, match_head, list)
up_write_ref_node(&iter->g->node);
up_write_ref_node(&g->node);
rule = add_rule_fg(g, spec->match_value,
flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node);
......@@ -1686,8 +1688,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
}
rule = ERR_PTR(-ENOENT);
out:
list_for_each_entry(iter, match_head, list)
up_write_ref_node(&iter->g->node);
kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
......@@ -1726,6 +1726,8 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (err) {
if (take_write)
up_write_ref_node(&ft->node);
else
up_read_ref_node(&ft->node);
return ERR_PTR(err);
}
......
......@@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
add_timer(&health->timer);
}
void mlx5_stop_health_poll(struct mlx5_core_dev *dev)
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health)
{
struct mlx5_core_health *health = &dev->priv.health;
unsigned long flags;
if (disable_health) {
spin_lock_irqsave(&health->wq_lock, flags);
set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
spin_unlock_irqrestore(&health->wq_lock, flags);
}
del_timer_sync(&health->timer);
}
......
......@@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
priv->numa_node = dev_to_node(&dev->pdev->dev);
priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
if (!priv->dbg_root)
if (!priv->dbg_root) {
dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n");
return -ENOMEM;
}
err = mlx5_pci_enable_device(dev);
if (err) {
......@@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
pci_clear_master(dev->pdev);
release_bar(dev->pdev);
mlx5_pci_disable_device(dev);
debugfs_remove(priv->dbg_root);
debugfs_remove_recursive(priv->dbg_root);
}
static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
......@@ -1286,7 +1288,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_cleanup_once(dev);
err_stop_poll:
mlx5_stop_health_poll(dev);
mlx5_stop_health_poll(dev, boot);
if (mlx5_cmd_teardown_hca(dev)) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
goto out_err;
......@@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
mlx5_free_irq_vectors(dev);
if (cleanup)
mlx5_cleanup_once(dev);
mlx5_stop_health_poll(dev);
mlx5_stop_health_poll(dev, cleanup);
err = mlx5_cmd_teardown_hca(dev);
if (err) {
dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
......@@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
* with the HCA, so the health polll is no longer needed.
*/
mlx5_drain_health_wq(dev);
mlx5_stop_health_poll(dev);
mlx5_stop_health_poll(dev, false);
ret = mlx5_cmd_force_teardown_hca(dev);
if (ret) {
......
......@@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
return (u32)wq->fbc.sz_m1 + 1;
}
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq)
{
return (u32)wq->fbc.frag_sz_m1 + 1;
return wq->fbc.frag_sz_m1 + 1;
}
u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
......@@ -138,7 +138,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
u32 sq_strides_offset;
u16 sq_strides_offset;
u32 rq_pg_remainder;
int err;
......
......@@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
......
......@@ -362,8 +362,8 @@ struct mlx5_frag_buf {
struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf;
u32 sz_m1;
u32 frag_sz_m1;
u32 strides_offset;
u16 frag_sz_m1;
u16 strides_offset;
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
......@@ -995,7 +995,7 @@ static inline u32 mlx5_base_mkey(const u32 key)
}
static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
u32 strides_offset,
u16 strides_offset,
struct mlx5_frag_buf_ctrl *fbc)
{
fbc->log_stride = log_stride;
......@@ -1052,7 +1052,7 @@ int mlx5_cmd_free_uar(struct mlx5_core_dev *dev, u32 uarn);
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
void mlx5_drain_health_recovery(struct mlx5_core_dev *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment