Commit 51443685 authored by Sebastian Andrzej Siewior's avatar Sebastian Andrzej Siewior Committed by Saeed Mahameed

net: mlx5: Replace in_irq() usage

mlx5_eq_async_int() uses in_irq() to decide whether eq::lock needs to be
acquired and released with spin_[un]lock() or the irq saving/restoring
variants.

The usage of in_*() in drivers is phased out and Linus clearly requested
that code which changes behaviour depending on context should either be
seperated or the context be conveyed in an argument passed by the caller,
which usually knows the context.

mlx5_eq_async_int() knows the context via the action argument already so
using it for the lock variant decision is a straight forward replacement
for in_irq().
Signed-off-by: default avatarSebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent 6c613203
...@@ -189,19 +189,21 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq) ...@@ -189,19 +189,21 @@ u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq)
return count_eqe; return count_eqe;
} }
static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, unsigned long *flags) static void mlx5_eq_async_int_lock(struct mlx5_eq_async *eq, bool recovery,
unsigned long *flags)
__acquires(&eq->lock) __acquires(&eq->lock)
{ {
if (in_irq()) if (!recovery)
spin_lock(&eq->lock); spin_lock(&eq->lock);
else else
spin_lock_irqsave(&eq->lock, *flags); spin_lock_irqsave(&eq->lock, *flags);
} }
static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, unsigned long *flags) static void mlx5_eq_async_int_unlock(struct mlx5_eq_async *eq, bool recovery,
unsigned long *flags)
__releases(&eq->lock) __releases(&eq->lock)
{ {
if (in_irq()) if (!recovery)
spin_unlock(&eq->lock); spin_unlock(&eq->lock);
else else
spin_unlock_irqrestore(&eq->lock, *flags); spin_unlock_irqrestore(&eq->lock, *flags);
...@@ -223,11 +225,13 @@ static int mlx5_eq_async_int(struct notifier_block *nb, ...@@ -223,11 +225,13 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
struct mlx5_eqe *eqe; struct mlx5_eqe *eqe;
unsigned long flags; unsigned long flags;
int num_eqes = 0; int num_eqes = 0;
bool recovery;
dev = eq->dev; dev = eq->dev;
eqt = dev->priv.eq_table; eqt = dev->priv.eq_table;
mlx5_eq_async_int_lock(eq_async, &flags); recovery = action == ASYNC_EQ_RECOVER;
mlx5_eq_async_int_lock(eq_async, recovery, &flags);
eqe = next_eqe_sw(eq); eqe = next_eqe_sw(eq);
if (!eqe) if (!eqe)
...@@ -249,9 +253,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb, ...@@ -249,9 +253,9 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
out: out:
eq_update_ci(eq, 1); eq_update_ci(eq, 1);
mlx5_eq_async_int_unlock(eq_async, &flags); mlx5_eq_async_int_unlock(eq_async, recovery, &flags);
return unlikely(action == ASYNC_EQ_RECOVER) ? num_eqes : 0; return unlikely(recovery) ? num_eqes : 0;
} }
void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev) void mlx5_cmd_eq_recover(struct mlx5_core_dev *dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment