Commit 9afc97c2 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Jason Gunthorpe

mlx5: remove support for ib_get_vector_affinity

Devices that does not use managed affinity can not export a vector
affinity as the consumer relies on having a static mapping it can map to
upper layer affinity (e.g. sw queues). If the driver allows the user to
set the device irq affinity, then the affinitization of a long term
existing entites is not relevant.

For example, nvme-rdma controllers queue-irq affinitization is determined
at init time so if the irq affinity changes over time, we are no longer
aligned.
Signed-off-by: default avatarSagi Grimberg <sagi@grimberg.me>
Acked-by: default avatarLeon Romanovsky <leonro@mellanox.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent ce1fd6bf
...@@ -5332,14 +5332,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev) ...@@ -5332,14 +5332,6 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n"); mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
} }
static const struct cpumask *
mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
}
/* The mlx5_ib_multiport_mutex should be held when calling this function */ /* The mlx5_ib_multiport_mutex should be held when calling this function */
static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev,
struct mlx5_ib_multiport_info *mpi) struct mlx5_ib_multiport_info *mpi)
...@@ -5845,7 +5837,6 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) ...@@ -5845,7 +5837,6 @@ int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev)
dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg; dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
dev->ib_dev.get_dev_fw_str = get_dev_fw_str; dev->ib_dev.get_dev_fw_str = get_dev_fw_str;
dev->ib_dev.get_vector_affinity = mlx5_ib_get_vector_affinity;
if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) &&
IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) IS_ENABLED(CONFIG_MLX5_CORE_IPOIB))
dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params; dev->ib_dev.rdma_netdev_get_params = mlx5_ib_rn_get_params;
......
...@@ -1306,10 +1306,4 @@ enum { ...@@ -1306,10 +1306,4 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
}; };
static inline const struct cpumask *
mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
{
return dev->priv.irq_info[vector].mask;
}
#endif /* MLX5_DRIVER_H */ #endif /* MLX5_DRIVER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment