Commit 3913d00a authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'irq-core-2020-12-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq updates from Thomas Gleixner:
 "This is the second attempt after the first one failed miserably and
  got zapped to unblock the rest of the interrupt related patches.

  A treewide cleanup of interrupt descriptor (ab)use with all sorts of
  racy accesses, inefficient and disfunctional code. The goal is to
  remove the export of irq_to_desc() to prevent these things from
  creeping up again"

* tag 'irq-core-2020-12-23' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (30 commits)
  genirq: Restrict export of irq_to_desc()
  xen/events: Implement irq distribution
  xen/events: Reduce irq_info:: Spurious_cnt storage size
  xen/events: Only force affinity mask for percpu interrupts
  xen/events: Use immediate affinity setting
  xen/events: Remove disfunct affinity spreading
  xen/events: Remove unused bind_evtchn_to_irq_lateeoi()
  net/mlx5: Use effective interrupt affinity
  net/mlx5: Replace irq_to_desc() abuse
  net/mlx4: Use effective interrupt affinity
  net/mlx4: Replace irq_to_desc() abuse
  PCI: mobiveil: Use irq_data_get_irq_chip_data()
  PCI: xilinx-nwl: Use irq_data_get_irq_chip_data()
  NTB/msi: Use irq_has_action()
  mfd: ab8500-debugfs: Remove the racy fiddling with irq_desc
  pinctrl: nomadik: Use irq_has_action()
  drm/i915/pmu: Replace open coded kstat_irqs() copy
  drm/i915/lpe_audio: Remove pointless irq_to_desc() usage
  s390/irq: Use irq_desc_kstat_cpu() in show_msi_interrupt()
  parisc/irq: Use irq_desc_kstat_cpu() in show_interrupts()
  ...
parents 4a1106af 64a1b95b
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* *
* Code supporting the Jensen. * Code supporting the Jensen.
*/ */
#include <linux/interrupt.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/mm.h> #include <linux/mm.h>
......
...@@ -549,7 +549,7 @@ void show_ipi_list(struct seq_file *p, int prec) ...@@ -549,7 +549,7 @@ void show_ipi_list(struct seq_file *p, int prec)
seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
seq_printf(p, " %s\n", ipi_types[i]); seq_printf(p, " %s\n", ipi_types[i]);
} }
......
...@@ -811,7 +811,7 @@ int arch_show_interrupts(struct seq_file *p, int prec) ...@@ -811,7 +811,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
prec >= 4 ? " " : ""); prec >= 4 ? " " : "");
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); seq_printf(p, "%10u ", irq_desc_kstat_cpu(ipi_desc[i], cpu));
seq_printf(p, " %s\n", ipi_types[i]); seq_printf(p, " %s\n", ipi_types[i]);
} }
......
...@@ -216,12 +216,9 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -216,12 +216,9 @@ int show_interrupts(struct seq_file *p, void *v)
if (!action) if (!action)
goto skip; goto skip;
seq_printf(p, "%3d: ", i); seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
for_each_online_cpu(j) for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); seq_printf(p, "%10u ", irq_desc_kstat_cpu(desc, j));
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif
seq_printf(p, " %14s", irq_desc_get_chip(desc)->name); seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
#ifndef PARISC_IRQ_CR16_COUNTS #ifndef PARISC_IRQ_CR16_COUNTS
......
...@@ -124,7 +124,7 @@ static void show_msi_interrupt(struct seq_file *p, int irq) ...@@ -124,7 +124,7 @@ static void show_msi_interrupt(struct seq_file *p, int irq)
raw_spin_lock_irqsave(&desc->lock, flags); raw_spin_lock_irqsave(&desc->lock, flags);
seq_printf(p, "%3d: ", irq); seq_printf(p, "%3d: ", irq);
for_each_online_cpu(cpu) for_each_online_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); seq_printf(p, "%10u ", irq_desc_kstat_cpu(desc, cpu));
if (desc->irq_data.chip) if (desc->irq_data.chip)
seq_printf(p, " %8s", desc->irq_data.chip->name); seq_printf(p, " %8s", desc->irq_data.chip->name);
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
* *
* Send feedback to <colpatch@us.ibm.com> * Send feedback to <colpatch@us.ibm.com>
*/ */
#include <linux/interrupt.h>
#include <linux/nodemask.h> #include <linux/nodemask.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/mmzone.h> #include <linux/mmzone.h>
......
...@@ -297,13 +297,9 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv) ...@@ -297,13 +297,9 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
*/ */
void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv) void intel_lpe_audio_teardown(struct drm_i915_private *dev_priv)
{ {
struct irq_desc *desc;
if (!HAS_LPE_AUDIO(dev_priv)) if (!HAS_LPE_AUDIO(dev_priv))
return; return;
desc = irq_to_desc(dev_priv->lpe_audio.irq);
lpe_audio_platdev_destroy(dev_priv); lpe_audio_platdev_destroy(dev_priv);
irq_free_desc(dev_priv->lpe_audio.irq); irq_free_desc(dev_priv->lpe_audio.irq);
......
...@@ -60,6 +60,24 @@ ...@@ -60,6 +60,24 @@
* and related files, but that will be described in separate chapters. * and related files, but that will be described in separate chapters.
*/ */
/*
* Interrupt statistic for PMU. Increments the counter only if the
* interrupt originated from the the GPU so interrupts from a device which
* shares the interrupt line are not accounted.
*/
static inline void pmu_irq_stats(struct drm_i915_private *i915,
irqreturn_t res)
{
if (unlikely(res != IRQ_HANDLED))
return;
/*
* A clever compiler translates that into INC. A not so clever one
* should at least prevent store tearing.
*/
WRITE_ONCE(i915->pmu.irq_count, i915->pmu.irq_count + 1);
}
typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val); typedef bool (*long_pulse_detect_func)(enum hpd_pin pin, u32 val);
typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915, typedef u32 (*hotplug_enables_func)(struct drm_i915_private *i915,
enum hpd_pin pin); enum hpd_pin pin);
...@@ -1668,6 +1686,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) ...@@ -1668,6 +1686,8 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats); valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0); } while (0);
pmu_irq_stats(dev_priv, ret);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret; return ret;
...@@ -1745,6 +1765,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg) ...@@ -1745,6 +1765,8 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
valleyview_pipestat_irq_handler(dev_priv, pipe_stats); valleyview_pipestat_irq_handler(dev_priv, pipe_stats);
} while (0); } while (0);
pmu_irq_stats(dev_priv, ret);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret; return ret;
...@@ -2155,6 +2177,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) ...@@ -2155,6 +2177,8 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg)
if (sde_ier) if (sde_ier)
raw_reg_write(regs, SDEIER, sde_ier); raw_reg_write(regs, SDEIER, sde_ier);
pmu_irq_stats(i915, ret);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */ /* IRQs are synced during runtime_suspend, we don't require a wakeref */
enable_rpm_wakeref_asserts(&i915->runtime_pm); enable_rpm_wakeref_asserts(&i915->runtime_pm);
...@@ -2541,6 +2565,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg) ...@@ -2541,6 +2565,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs); gen8_master_intr_enable(regs);
pmu_irq_stats(dev_priv, IRQ_HANDLED);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -2636,6 +2662,8 @@ __gen11_irq_handler(struct drm_i915_private * const i915, ...@@ -2636,6 +2662,8 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
gen11_gu_misc_irq_handler(gt, gu_misc_iir); gen11_gu_misc_irq_handler(gt, gu_misc_iir);
pmu_irq_stats(i915, IRQ_HANDLED);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -3934,6 +3962,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) ...@@ -3934,6 +3962,8 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats); i8xx_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0); } while (0);
pmu_irq_stats(dev_priv, ret);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret; return ret;
...@@ -4043,6 +4073,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) ...@@ -4043,6 +4073,8 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
i915_pipestat_irq_handler(dev_priv, iir, pipe_stats); i915_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0); } while (0);
pmu_irq_stats(dev_priv, ret);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret; return ret;
...@@ -4189,6 +4221,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) ...@@ -4189,6 +4221,8 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
i965_pipestat_irq_handler(dev_priv, iir, pipe_stats); i965_pipestat_irq_handler(dev_priv, iir, pipe_stats);
} while (0); } while (0);
pmu_irq_stats(dev_priv, IRQ_HANDLED);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return ret; return ret;
......
...@@ -4,7 +4,6 @@ ...@@ -4,7 +4,6 @@
* Copyright © 2017-2018 Intel Corporation * Copyright © 2017-2018 Intel Corporation
*/ */
#include <linux/irq.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include "gt/intel_engine.h" #include "gt/intel_engine.h"
...@@ -424,22 +423,6 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer) ...@@ -424,22 +423,6 @@ static enum hrtimer_restart i915_sample(struct hrtimer *hrtimer)
return HRTIMER_RESTART; return HRTIMER_RESTART;
} }
static u64 count_interrupts(struct drm_i915_private *i915)
{
/* open-coded kstat_irqs() */
struct irq_desc *desc = irq_to_desc(i915->drm.pdev->irq);
u64 sum = 0;
int cpu;
if (!desc || !desc->kstat_irqs)
return 0;
for_each_possible_cpu(cpu)
sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
return sum;
}
static void i915_pmu_event_destroy(struct perf_event *event) static void i915_pmu_event_destroy(struct perf_event *event)
{ {
struct drm_i915_private *i915 = struct drm_i915_private *i915 =
...@@ -590,7 +573,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event) ...@@ -590,7 +573,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
USEC_PER_SEC /* to MHz */); USEC_PER_SEC /* to MHz */);
break; break;
case I915_PMU_INTERRUPTS: case I915_PMU_INTERRUPTS:
val = count_interrupts(i915); val = READ_ONCE(pmu->irq_count);
break; break;
case I915_PMU_RC6_RESIDENCY: case I915_PMU_RC6_RESIDENCY:
val = get_rc6(&i915->gt); val = get_rc6(&i915->gt);
......
...@@ -111,6 +111,14 @@ struct i915_pmu { ...@@ -111,6 +111,14 @@ struct i915_pmu {
* @sleep_last: Last time GT parked for RC6 estimation. * @sleep_last: Last time GT parked for RC6 estimation.
*/ */
ktime_t sleep_last; ktime_t sleep_last;
/**
* @irq_count: Number of interrupts
*
* Intentionally unsigned long to avoid atomics or heuristics on 32bit.
* 4e9 interrupts are a lot and postprocessing can really deal with an
* occasional wraparound easily. It's 32bit after all.
*/
unsigned long irq_count;
/** /**
* @events_attr_group: Device events attribute group. * @events_attr_group: Device events attribute group.
*/ */
......
...@@ -1513,24 +1513,14 @@ static int ab8500_interrupts_show(struct seq_file *s, void *p) ...@@ -1513,24 +1513,14 @@ static int ab8500_interrupts_show(struct seq_file *s, void *p)
{ {
int line; int line;
seq_puts(s, "name: number: number of: wake:\n"); seq_puts(s, "name: number: irq: number of: wake:\n");
for (line = 0; line < num_interrupt_lines; line++) { for (line = 0; line < num_interrupt_lines; line++) {
struct irq_desc *desc = irq_to_desc(line + irq_first); seq_printf(s, "%3i: %4i %6i %4i\n",
seq_printf(s, "%3i: %6i %4i",
line, line,
line + irq_first,
num_interrupts[line], num_interrupts[line],
num_wake_interrupts[line]); num_wake_interrupts[line]);
if (desc && desc->name)
seq_printf(s, "-%-8s", desc->name);
if (desc && desc->action) {
struct irqaction *action = desc->action;
seq_printf(s, " %s", action->name);
while ((action = action->next) != NULL)
seq_printf(s, ", %s", action->name);
} }
seq_putc(s, '\n'); seq_putc(s, '\n');
} }
......
...@@ -90,7 +90,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -90,7 +90,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int cq_idx) int cq_idx)
{ {
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
int err = 0; int irq, err = 0;
int timestamp_en = 0; int timestamp_en = 0;
bool assigned_eq = false; bool assigned_eq = false;
...@@ -116,10 +116,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, ...@@ -116,10 +116,8 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
assigned_eq = true; assigned_eq = true;
} }
irq = mlx4_eq_get_irq(mdev->dev, cq->vector);
cq->irq_desc = cq->aff_mask = irq_get_effective_affinity_mask(irq);
irq_to_desc(mlx4_eq_get_irq(mdev->dev,
cq->vector));
} else { } else {
/* For TX we use the same irq per /* For TX we use the same irq per
ring we assigned for the RX */ ring we assigned for the RX */
......
...@@ -958,18 +958,14 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget) ...@@ -958,18 +958,14 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */ /* If we used up all the quota - we're probably not done yet... */
if (done == budget || !clean_complete) { if (done == budget || !clean_complete) {
const struct cpumask *aff;
struct irq_data *idata;
int cpu_curr; int cpu_curr;
/* in case we got here because of !clean_complete */ /* in case we got here because of !clean_complete */
done = budget; done = budget;
cpu_curr = smp_processor_id(); cpu_curr = smp_processor_id();
idata = irq_desc_get_irq_data(cq->irq_desc);
aff = irq_data_get_affinity_mask(idata);
if (likely(cpumask_test_cpu(cpu_curr, aff))) if (likely(cpumask_test_cpu(cpu_curr, cq->aff_mask)))
return budget; return budget;
/* Current cpu is not according to smp_irq_affinity - /* Current cpu is not according to smp_irq_affinity -
......
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#endif #endif
#include <linux/cpu_rmap.h> #include <linux/cpu_rmap.h>
#include <linux/ptp_clock_kernel.h> #include <linux/ptp_clock_kernel.h>
#include <linux/irq.h>
#include <net/xdp.h> #include <net/xdp.h>
#include <linux/mlx4/device.h> #include <linux/mlx4/device.h>
...@@ -365,7 +366,7 @@ struct mlx4_en_cq { ...@@ -365,7 +366,7 @@ struct mlx4_en_cq {
struct mlx4_cqe *buf; struct mlx4_cqe *buf;
#define MLX4_EN_OPCODE_ERROR 0x1e #define MLX4_EN_OPCODE_ERROR 0x1e
struct irq_desc *irq_desc; const struct cpumask *aff_mask;
}; };
struct mlx4_en_port_profile { struct mlx4_en_port_profile {
......
...@@ -684,7 +684,7 @@ struct mlx5e_channel { ...@@ -684,7 +684,7 @@ struct mlx5e_channel {
spinlock_t async_icosq_lock; spinlock_t async_icosq_lock;
/* data path - accessed per napi poll */ /* data path - accessed per napi poll */
struct irq_desc *irq_desc; const struct cpumask *aff_mask;
struct mlx5e_ch_stats *stats; struct mlx5e_ch_stats *stats;
/* control */ /* control */
......
...@@ -479,7 +479,6 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, ...@@ -479,7 +479,6 @@ int mlx5e_port_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key); c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc; c->num_tc = params->num_tc;
c->stats = &priv->port_ptp_stats.ch; c->stats = &priv->port_ptp_stats.ch;
c->irq_desc = irq_to_desc(irq);
c->lag_port = lag_port; c->lag_port = lag_port;
netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64); netif_napi_add(netdev, &c->napi, mlx5e_ptp_napi_poll, 64);
......
...@@ -28,7 +28,6 @@ struct mlx5e_port_ptp { ...@@ -28,7 +28,6 @@ struct mlx5e_port_ptp {
u8 lag_port; u8 lag_port;
/* data path - accessed per napi poll */ /* data path - accessed per napi poll */
struct irq_desc *irq_desc;
struct mlx5e_ch_stats *stats; struct mlx5e_ch_stats *stats;
/* control */ /* control */
......
...@@ -1987,7 +1987,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, ...@@ -1987,7 +1987,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->num_tc = params->num_tc; c->num_tc = params->num_tc;
c->xdp = !!params->xdp_prog; c->xdp = !!params->xdp_prog;
c->stats = &priv->channel_stats[ix].ch; c->stats = &priv->channel_stats[ix].ch;
c->irq_desc = irq_to_desc(irq); c->aff_mask = irq_get_effective_affinity_mask(irq);
c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix); c->lag_port = mlx5e_enumerate_lag_port(priv->mdev, ix);
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
......
...@@ -40,12 +40,8 @@ ...@@ -40,12 +40,8 @@
static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c) static inline bool mlx5e_channel_no_affinity_change(struct mlx5e_channel *c)
{ {
int current_cpu = smp_processor_id(); int current_cpu = smp_processor_id();
const struct cpumask *aff;
struct irq_data *idata;
idata = irq_desc_get_irq_data(c->irq_desc); return cpumask_test_cpu(current_cpu, c->aff_mask);
aff = irq_data_get_affinity_mask(idata);
return cpumask_test_cpu(current_cpu, aff);
} }
static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq) static void mlx5e_handle_tx_dim(struct mlx5e_txqsq *sq)
......
...@@ -282,15 +282,13 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler, ...@@ -282,15 +282,13 @@ int ntbm_msi_request_threaded_irq(struct ntb_dev *ntb, irq_handler_t handler,
struct ntb_msi_desc *msi_desc) struct ntb_msi_desc *msi_desc)
{ {
struct msi_desc *entry; struct msi_desc *entry;
struct irq_desc *desc;
int ret; int ret;
if (!ntb->msi) if (!ntb->msi)
return -EINVAL; return -EINVAL;
for_each_pci_msi_entry(entry, ntb->pdev) { for_each_pci_msi_entry(entry, ntb->pdev) {
desc = irq_to_desc(entry->irq); if (irq_has_action(entry->irq))
if (desc->action)
continue; continue;
ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler, ret = devm_request_threaded_irq(&ntb->dev, entry->irq, handler,
......
...@@ -306,13 +306,11 @@ int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit) ...@@ -306,13 +306,11 @@ int mobiveil_host_init(struct mobiveil_pcie *pcie, bool reinit)
static void mobiveil_mask_intx_irq(struct irq_data *data) static void mobiveil_mask_intx_irq(struct irq_data *data)
{ {
struct irq_desc *desc = irq_to_desc(data->irq); struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
struct mobiveil_pcie *pcie;
struct mobiveil_root_port *rp; struct mobiveil_root_port *rp;
unsigned long flags; unsigned long flags;
u32 mask, shifted_val; u32 mask, shifted_val;
pcie = irq_desc_get_chip_data(desc);
rp = &pcie->rp; rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&rp->intx_mask_lock, flags); raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
...@@ -324,13 +322,11 @@ static void mobiveil_mask_intx_irq(struct irq_data *data) ...@@ -324,13 +322,11 @@ static void mobiveil_mask_intx_irq(struct irq_data *data)
static void mobiveil_unmask_intx_irq(struct irq_data *data) static void mobiveil_unmask_intx_irq(struct irq_data *data)
{ {
struct irq_desc *desc = irq_to_desc(data->irq); struct mobiveil_pcie *pcie = irq_data_get_irq_chip_data(data);
struct mobiveil_pcie *pcie;
struct mobiveil_root_port *rp; struct mobiveil_root_port *rp;
unsigned long flags; unsigned long flags;
u32 shifted_val, mask; u32 shifted_val, mask;
pcie = irq_desc_get_chip_data(desc);
rp = &pcie->rp; rp = &pcie->rp;
mask = 1 << ((data->hwirq + PAB_INTX_START) - 1); mask = 1 << ((data->hwirq + PAB_INTX_START) - 1);
raw_spin_lock_irqsave(&rp->intx_mask_lock, flags); raw_spin_lock_irqsave(&rp->intx_mask_lock, flags);
......
...@@ -374,13 +374,11 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc) ...@@ -374,13 +374,11 @@ static void nwl_pcie_msi_handler_low(struct irq_desc *desc)
static void nwl_mask_leg_irq(struct irq_data *data) static void nwl_mask_leg_irq(struct irq_data *data)
{ {
struct irq_desc *desc = irq_to_desc(data->irq); struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
struct nwl_pcie *pcie;
unsigned long flags; unsigned long flags;
u32 mask; u32 mask;
u32 val; u32 val;
pcie = irq_desc_get_chip_data(desc);
mask = 1 << (data->hwirq - 1); mask = 1 << (data->hwirq - 1);
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
...@@ -390,13 +388,11 @@ static void nwl_mask_leg_irq(struct irq_data *data) ...@@ -390,13 +388,11 @@ static void nwl_mask_leg_irq(struct irq_data *data)
static void nwl_unmask_leg_irq(struct irq_data *data) static void nwl_unmask_leg_irq(struct irq_data *data)
{ {
struct irq_desc *desc = irq_to_desc(data->irq); struct nwl_pcie *pcie = irq_data_get_irq_chip_data(data);
struct nwl_pcie *pcie;
unsigned long flags; unsigned long flags;
u32 mask; u32 mask;
u32 val; u32 val;
pcie = irq_desc_get_chip_data(desc);
mask = 1 << (data->hwirq - 1); mask = 1 << (data->hwirq - 1);
raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags); raw_spin_lock_irqsave(&pcie->leg_mask_lock, flags);
val = nwl_bridge_readl(pcie, MSGF_LEG_MASK); val = nwl_bridge_readl(pcie, MSGF_LEG_MASK);
......
...@@ -948,8 +948,8 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s, ...@@ -948,8 +948,8 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
(mode < 0) ? "unknown" : modes[mode]); (mode < 0) ? "unknown" : modes[mode]);
} else { } else {
int irq = chip->to_irq(chip, offset); int irq = chip->to_irq(chip, offset);
struct irq_desc *desc = irq_to_desc(irq);
const int pullidx = pull ? 1 : 0; const int pullidx = pull ? 1 : 0;
bool wake;
int val; int val;
static const char * const pulls[] = { static const char * const pulls[] = {
"none ", "none ",
...@@ -969,8 +969,9 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s, ...@@ -969,8 +969,9 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
* This races with request_irq(), set_irq_type(), * This races with request_irq(), set_irq_type(),
* and set_irq_wake() ... but those are "rare". * and set_irq_wake() ... but those are "rare".
*/ */
if (irq > 0 && desc && desc->action) { if (irq > 0 && irq_has_action(irq)) {
char *trigger; char *trigger;
bool wake;
if (nmk_chip->edge_rising & BIT(offset)) if (nmk_chip->edge_rising & BIT(offset))
trigger = "edge-rising"; trigger = "edge-rising";
...@@ -979,10 +980,10 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s, ...@@ -979,10 +980,10 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s,
else else
trigger = "edge-undefined"; trigger = "edge-undefined";
wake = !!(nmk_chip->real_wake & BIT(offset));
seq_printf(s, " irq-%d %s%s", seq_printf(s, " irq-%d %s%s",
irq, trigger, irq, trigger, wake ? " wakeup" : "");
irqd_is_wakeup_set(&desc->irq_data)
? " wakeup" : "");
} }
} }
clk_disable(nmk_chip->clk); clk_disable(nmk_chip->clk);
......
...@@ -95,7 +95,8 @@ struct irq_info { ...@@ -95,7 +95,8 @@ struct irq_info {
struct list_head list; struct list_head list;
struct list_head eoi_list; struct list_head eoi_list;
short refcnt; short refcnt;
short spurious_cnt; u8 spurious_cnt;
u8 is_accounted;
enum xen_irq_type type; /* type */ enum xen_irq_type type; /* type */
unsigned irq; unsigned irq;
evtchn_port_t evtchn; /* event channel */ evtchn_port_t evtchn; /* event channel */
...@@ -161,6 +162,9 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1}; ...@@ -161,6 +162,9 @@ static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
/* IRQ <-> IPI mapping */ /* IRQ <-> IPI mapping */
static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1}; static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
/* Event channel distribution data */
static atomic_t channels_on_cpu[NR_CPUS];
static int **evtchn_to_irq; static int **evtchn_to_irq;
#ifdef CONFIG_X86 #ifdef CONFIG_X86
static unsigned long *pirq_eoi_map; static unsigned long *pirq_eoi_map;
...@@ -257,6 +261,32 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info) ...@@ -257,6 +261,32 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
irq_set_chip_data(irq, info); irq_set_chip_data(irq, info);
} }
/* Per CPU channel accounting */
static void channels_on_cpu_dec(struct irq_info *info)
{
if (!info->is_accounted)
return;
info->is_accounted = 0;
if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
return;
WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
}
static void channels_on_cpu_inc(struct irq_info *info)
{
if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
return;
if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
INT_MAX)))
return;
info->is_accounted = 1;
}
/* Constructors for packed IRQ information. */ /* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info, static int xen_irq_info_common_setup(struct irq_info *info,
unsigned irq, unsigned irq,
...@@ -339,6 +369,7 @@ static void xen_irq_info_cleanup(struct irq_info *info) ...@@ -339,6 +369,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
{ {
set_evtchn_to_irq(info->evtchn, -1); set_evtchn_to_irq(info->evtchn, -1);
info->evtchn = 0; info->evtchn = 0;
channels_on_cpu_dec(info);
} }
/* /*
...@@ -433,18 +464,25 @@ static bool pirq_needs_eoi_flag(unsigned irq) ...@@ -433,18 +464,25 @@ static bool pirq_needs_eoi_flag(unsigned irq)
return info->u.pirq.flags & PIRQ_NEEDS_EOI; return info->u.pirq.flags & PIRQ_NEEDS_EOI;
} }
static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu) static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
bool force_affinity)
{ {
int irq = get_evtchn_to_irq(evtchn); int irq = get_evtchn_to_irq(evtchn);
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
BUG_ON(irq == -1); BUG_ON(irq == -1);
#ifdef CONFIG_SMP
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu)); if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
#endif cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
cpumask_copy(irq_get_effective_affinity_mask(irq),
cpumask_of(cpu));
}
xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu); xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
channels_on_cpu_dec(info);
info->cpu = cpu; info->cpu = cpu;
channels_on_cpu_inc(info);
} }
/** /**
...@@ -523,8 +561,10 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious) ...@@ -523,8 +561,10 @@ static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
return; return;
if (spurious) { if (spurious) {
if ((1 << info->spurious_cnt) < (HZ << 2)) if ((1 << info->spurious_cnt) < (HZ << 2)) {
info->spurious_cnt++; if (info->spurious_cnt != 0xFF)
info->spurious_cnt++;
}
if (info->spurious_cnt > 1) { if (info->spurious_cnt > 1) {
delay = 1 << (info->spurious_cnt - 2); delay = 1 << (info->spurious_cnt - 2);
if (delay > HZ) if (delay > HZ)
...@@ -615,11 +655,6 @@ static void xen_irq_init(unsigned irq) ...@@ -615,11 +655,6 @@ static void xen_irq_init(unsigned irq)
{ {
struct irq_info *info; struct irq_info *info;
#ifdef CONFIG_SMP
/* By default all event channels notify CPU#0. */
cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(0));
#endif
info = kzalloc(sizeof(*info), GFP_KERNEL); info = kzalloc(sizeof(*info), GFP_KERNEL);
if (info == NULL) if (info == NULL)
panic("Unable to allocate metadata for IRQ%d\n", irq); panic("Unable to allocate metadata for IRQ%d\n", irq);
...@@ -628,6 +663,11 @@ static void xen_irq_init(unsigned irq) ...@@ -628,6 +663,11 @@ static void xen_irq_init(unsigned irq)
info->refcnt = -1; info->refcnt = -1;
set_info_for_irq(irq, info); set_info_for_irq(irq, info);
/*
* Interrupt affinity setting can be immediate. No point
* in delaying it until an interrupt is handled.
*/
irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
INIT_LIST_HEAD(&info->eoi_list); INIT_LIST_HEAD(&info->eoi_list);
list_add_tail(&info->list, &xen_irq_list_head); list_add_tail(&info->list, &xen_irq_list_head);
...@@ -739,18 +779,7 @@ static void eoi_pirq(struct irq_data *data) ...@@ -739,18 +779,7 @@ static void eoi_pirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn)) if (!VALID_EVTCHN(evtchn))
return; return;
if (unlikely(irqd_is_setaffinity_pending(data)) && clear_evtchn(evtchn);
likely(!irqd_irq_disabled(data))) {
int masked = test_and_set_mask(evtchn);
clear_evtchn(evtchn);
irq_move_masked_irq(data);
if (!masked)
unmask_evtchn(evtchn);
} else
clear_evtchn(evtchn);
if (pirq_needs_eoi(data->irq)) { if (pirq_needs_eoi(data->irq)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi); rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
...@@ -794,7 +823,7 @@ static unsigned int __startup_pirq(unsigned int irq) ...@@ -794,7 +823,7 @@ static unsigned int __startup_pirq(unsigned int irq)
goto err; goto err;
info->evtchn = evtchn; info->evtchn = evtchn;
bind_evtchn_to_cpu(evtchn, 0); bind_evtchn_to_cpu(evtchn, 0, false);
rc = xen_evtchn_port_setup(evtchn); rc = xen_evtchn_port_setup(evtchn);
if (rc) if (rc)
...@@ -1113,8 +1142,14 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip) ...@@ -1113,8 +1142,14 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip)
irq = ret; irq = ret;
goto out; goto out;
} }
/* New interdomain events are bound to VCPU 0. */ /*
bind_evtchn_to_cpu(evtchn, 0); * New interdomain events are initially bound to vCPU0 This
* is required to setup the event channel in the first
* place and also important for UP guests because the
* affinity setting is not invoked on them so nothing would
* bind the channel.
*/
bind_evtchn_to_cpu(evtchn, 0, false);
} else { } else {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN); WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
...@@ -1132,12 +1167,6 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn) ...@@ -1132,12 +1167,6 @@ int bind_evtchn_to_irq(evtchn_port_t evtchn)
} }
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq); EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
int bind_evtchn_to_irq_lateeoi(evtchn_port_t evtchn)
{
return bind_evtchn_to_irq_chip(evtchn, &xen_lateeoi_chip);
}
EXPORT_SYMBOL_GPL(bind_evtchn_to_irq_lateeoi);
static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{ {
struct evtchn_bind_ipi bind_ipi; struct evtchn_bind_ipi bind_ipi;
...@@ -1168,7 +1197,11 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) ...@@ -1168,7 +1197,11 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
irq = ret; irq = ret;
goto out; goto out;
} }
bind_evtchn_to_cpu(evtchn, cpu); /*
* Force the affinity mask to the target CPU so proc shows
* the correct target.
*/
bind_evtchn_to_cpu(evtchn, cpu, true);
} else { } else {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_IPI); WARN_ON(info == NULL || info->type != IRQT_IPI);
...@@ -1281,7 +1314,11 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu) ...@@ -1281,7 +1314,11 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
goto out; goto out;
} }
bind_evtchn_to_cpu(evtchn, cpu); /*
* Force the affinity mask for percpu interrupts so proc
* shows the correct target.
*/
bind_evtchn_to_cpu(evtchn, cpu, percpu);
} else { } else {
struct irq_info *info = info_for_irq(irq); struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_VIRQ); WARN_ON(info == NULL || info->type != IRQT_VIRQ);
...@@ -1646,9 +1683,7 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq) ...@@ -1646,9 +1683,7 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
mutex_unlock(&irq_mapping_update_lock); mutex_unlock(&irq_mapping_update_lock);
bind_evtchn_to_cpu(evtchn, info->cpu); bind_evtchn_to_cpu(evtchn, info->cpu, false);
/* This will be deferred until interrupt is processed */
irq_set_affinity(irq, cpumask_of(info->cpu));
/* Unmask the event channel. */ /* Unmask the event channel. */
enable_irq(irq); enable_irq(irq);
...@@ -1682,7 +1717,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) ...@@ -1682,7 +1717,7 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
* it, but don't do the xenlinux-level rebind in that case. * it, but don't do the xenlinux-level rebind in that case.
*/ */
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0) if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
bind_evtchn_to_cpu(evtchn, tcpu); bind_evtchn_to_cpu(evtchn, tcpu, false);
if (!masked) if (!masked)
unmask_evtchn(evtchn); unmask_evtchn(evtchn);
...@@ -1690,27 +1725,47 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu) ...@@ -1690,27 +1725,47 @@ static int xen_rebind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int tcpu)
return 0; return 0;
} }
/*
* Find the CPU within @dest mask which has the least number of channels
* assigned. This is not precise as the per cpu counts can be modified
* concurrently.
*/
static unsigned int select_target_cpu(const struct cpumask *dest)
{
unsigned int cpu, best_cpu = UINT_MAX, minch = UINT_MAX;
for_each_cpu_and(cpu, dest, cpu_online_mask) {
unsigned int curch = atomic_read(&channels_on_cpu[cpu]);
if (curch < minch) {
minch = curch;
best_cpu = cpu;
}
}
/*
* Catch the unlikely case that dest contains no online CPUs. Can't
* recurse.
*/
if (best_cpu == UINT_MAX)
return select_target_cpu(cpu_online_mask);
return best_cpu;
}
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
bool force) bool force)
{ {
unsigned tcpu = cpumask_first_and(dest, cpu_online_mask); unsigned int tcpu = select_target_cpu(dest);
int ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu); int ret;
ret = xen_rebind_evtchn_to_cpu(evtchn_from_irq(data->irq), tcpu);
if (!ret) if (!ret)
irq_data_update_effective_affinity(data, cpumask_of(tcpu)); irq_data_update_effective_affinity(data, cpumask_of(tcpu));
return ret; return ret;
} }
/* To be called with desc->lock held. */
int xen_set_affinity_evtchn(struct irq_desc *desc, unsigned int tcpu)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
return set_affinity_irq(d, cpumask_of(tcpu), false);
}
EXPORT_SYMBOL_GPL(xen_set_affinity_evtchn);
static void enable_dynirq(struct irq_data *data) static void enable_dynirq(struct irq_data *data)
{ {
evtchn_port_t evtchn = evtchn_from_irq(data->irq); evtchn_port_t evtchn = evtchn_from_irq(data->irq);
...@@ -1734,18 +1789,7 @@ static void ack_dynirq(struct irq_data *data) ...@@ -1734,18 +1789,7 @@ static void ack_dynirq(struct irq_data *data)
if (!VALID_EVTCHN(evtchn)) if (!VALID_EVTCHN(evtchn))
return; return;
if (unlikely(irqd_is_setaffinity_pending(data)) && clear_evtchn(evtchn);
likely(!irqd_irq_disabled(data))) {
int masked = test_and_set_mask(evtchn);
clear_evtchn(evtchn);
irq_move_masked_irq(data);
if (!masked)
unmask_evtchn(evtchn);
} else
clear_evtchn(evtchn);
} }
static void mask_ack_dynirq(struct irq_data *data) static void mask_ack_dynirq(struct irq_data *data)
...@@ -1830,7 +1874,8 @@ static void restore_cpu_virqs(unsigned int cpu) ...@@ -1830,7 +1874,8 @@ static void restore_cpu_virqs(unsigned int cpu)
/* Record the new mapping. */ /* Record the new mapping. */
(void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq); (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
bind_evtchn_to_cpu(evtchn, cpu); /* The affinity mask is still valid */
bind_evtchn_to_cpu(evtchn, cpu, false);
} }
} }
...@@ -1855,7 +1900,8 @@ static void restore_cpu_ipis(unsigned int cpu) ...@@ -1855,7 +1900,8 @@ static void restore_cpu_ipis(unsigned int cpu)
/* Record the new mapping. */ /* Record the new mapping. */
(void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi); (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
bind_evtchn_to_cpu(evtchn, cpu); /* The affinity mask is still valid */
bind_evtchn_to_cpu(evtchn, cpu, false);
} }
} }
...@@ -1938,8 +1984,12 @@ void xen_irq_resume(void) ...@@ -1938,8 +1984,12 @@ void xen_irq_resume(void)
xen_evtchn_resume(); xen_evtchn_resume();
/* No IRQ <-> event-channel mappings. */ /* No IRQ <-> event-channel mappings. */
list_for_each_entry(info, &xen_irq_list_head, list) list_for_each_entry(info, &xen_irq_list_head, list) {
info->evtchn = 0; /* zap event-channel binding */ /* Zap event-channel binding */
info->evtchn = 0;
/* Adjust accounting */
channels_on_cpu_dec(info);
}
clear_evtchn_to_irq_all(); clear_evtchn_to_irq_all();
......
...@@ -421,36 +421,6 @@ static void evtchn_unbind_from_user(struct per_user_data *u, ...@@ -421,36 +421,6 @@ static void evtchn_unbind_from_user(struct per_user_data *u,
del_evtchn(u, evtchn); del_evtchn(u, evtchn);
} }
static DEFINE_PER_CPU(int, bind_last_selected_cpu);
static void evtchn_bind_interdom_next_vcpu(evtchn_port_t evtchn)
{
unsigned int selected_cpu, irq;
struct irq_desc *desc;
unsigned long flags;
irq = irq_from_evtchn(evtchn);
desc = irq_to_desc(irq);
if (!desc)
return;
raw_spin_lock_irqsave(&desc->lock, flags);
selected_cpu = this_cpu_read(bind_last_selected_cpu);
selected_cpu = cpumask_next_and(selected_cpu,
desc->irq_common_data.affinity, cpu_online_mask);
if (unlikely(selected_cpu >= nr_cpu_ids))
selected_cpu = cpumask_first_and(desc->irq_common_data.affinity,
cpu_online_mask);
this_cpu_write(bind_last_selected_cpu, selected_cpu);
/* unmask expects irqs to be disabled */
xen_set_affinity_evtchn(desc, selected_cpu);
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
static long evtchn_ioctl(struct file *file, static long evtchn_ioctl(struct file *file,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
...@@ -508,10 +478,8 @@ static long evtchn_ioctl(struct file *file, ...@@ -508,10 +478,8 @@ static long evtchn_ioctl(struct file *file,
break; break;
rc = evtchn_bind_to_user(u, bind_interdomain.local_port); rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
if (rc == 0) { if (rc == 0)
rc = bind_interdomain.local_port; rc = bind_interdomain.local_port;
evtchn_bind_interdom_next_vcpu(rc);
}
break; break;
} }
......
...@@ -232,6 +232,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); ...@@ -232,6 +232,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
# define local_irq_enable_in_hardirq() local_irq_enable() # define local_irq_enable_in_hardirq() local_irq_enable()
#endif #endif
bool irq_has_action(unsigned int irq);
extern void disable_irq_nosync(unsigned int irq); extern void disable_irq_nosync(unsigned int irq);
extern bool disable_hardirq(unsigned int irq); extern bool disable_hardirq(unsigned int irq);
extern void disable_irq(unsigned int irq); extern void disable_irq(unsigned int irq);
......
...@@ -906,6 +906,13 @@ struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) ...@@ -906,6 +906,13 @@ struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
} }
#endif #endif
static inline struct cpumask *irq_get_effective_affinity_mask(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
return d ? irq_data_get_effective_affinity_mask(d) : NULL;
}
unsigned int arch_dynirq_lower_bound(unsigned int from); unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
......
...@@ -113,6 +113,12 @@ static inline void irq_unlock_sparse(void) { } ...@@ -113,6 +113,12 @@ static inline void irq_unlock_sparse(void) { }
extern struct irq_desc irq_desc[NR_IRQS]; extern struct irq_desc irq_desc[NR_IRQS];
#endif #endif
static inline unsigned int irq_desc_kstat_cpu(struct irq_desc *desc,
unsigned int cpu)
{
return desc->kstat_irqs ? *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
}
static inline struct irq_desc *irq_data_to_desc(struct irq_data *data) static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
{ {
return container_of(data->common, struct irq_desc, irq_common_data); return container_of(data->common, struct irq_desc, irq_common_data);
...@@ -179,12 +185,7 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, ...@@ -179,12 +185,7 @@ int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
/* Test to see if a driver has successfully requested an irq */ /* Test to see if a driver has successfully requested an irq */
static inline int irq_desc_has_action(struct irq_desc *desc) static inline int irq_desc_has_action(struct irq_desc *desc)
{ {
return desc->action != NULL; return desc && desc->action != NULL;
}
static inline int irq_has_action(unsigned int irq)
{
return irq_desc_has_action(irq_to_desc(irq));
} }
/** /**
...@@ -228,40 +229,31 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip, ...@@ -228,40 +229,31 @@ irq_set_chip_handler_name_locked(struct irq_data *data, struct irq_chip *chip,
data->chip = chip; data->chip = chip;
} }
bool irq_check_status_bit(unsigned int irq, unsigned int bitmask);
static inline bool irq_balancing_disabled(unsigned int irq) static inline bool irq_balancing_disabled(unsigned int irq)
{ {
struct irq_desc *desc; return irq_check_status_bit(irq, IRQ_NO_BALANCING_MASK);
desc = irq_to_desc(irq);
return desc->status_use_accessors & IRQ_NO_BALANCING_MASK;
} }
static inline bool irq_is_percpu(unsigned int irq) static inline bool irq_is_percpu(unsigned int irq)
{ {
struct irq_desc *desc; return irq_check_status_bit(irq, IRQ_PER_CPU);
desc = irq_to_desc(irq);
return desc->status_use_accessors & IRQ_PER_CPU;
} }
static inline bool irq_is_percpu_devid(unsigned int irq) static inline bool irq_is_percpu_devid(unsigned int irq)
{ {
struct irq_desc *desc; return irq_check_status_bit(irq, IRQ_PER_CPU_DEVID);
desc = irq_to_desc(irq);
return desc->status_use_accessors & IRQ_PER_CPU_DEVID;
} }
void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class);
static inline void static inline void
irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class, irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class) struct lock_class_key *request_class)
{ {
struct irq_desc *desc = irq_to_desc(irq); if (IS_ENABLED(CONFIG_LOCKDEP))
__irq_set_lockdep_class(irq, lock_class, request_class);
if (desc) {
lockdep_set_class(&desc->lock, lock_class);
lockdep_set_class(&desc->request_mutex, request_class);
}
} }
#endif #endif
...@@ -67,7 +67,6 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu) ...@@ -67,7 +67,6 @@ static inline unsigned int kstat_softirqs_cpu(unsigned int irq, int cpu)
/* /*
* Number of interrupts per specific IRQ source, since bootup * Number of interrupts per specific IRQ source, since bootup
*/ */
extern unsigned int kstat_irqs(unsigned int irq);
extern unsigned int kstat_irqs_usr(unsigned int irq); extern unsigned int kstat_irqs_usr(unsigned int irq);
/* /*
......
...@@ -147,12 +147,12 @@ static ssize_t per_cpu_count_show(struct kobject *kobj, ...@@ -147,12 +147,12 @@ static ssize_t per_cpu_count_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf) struct kobj_attribute *attr, char *buf)
{ {
struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj); struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
int cpu, irq = desc->irq_data.irq;
ssize_t ret = 0; ssize_t ret = 0;
char *p = ""; char *p = "";
int cpu;
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
unsigned int c = kstat_irqs_cpu(irq, cpu); unsigned int c = irq_desc_kstat_cpu(desc, cpu);
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c); ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
p = ","; p = ",";
...@@ -352,7 +352,9 @@ struct irq_desc *irq_to_desc(unsigned int irq) ...@@ -352,7 +352,9 @@ struct irq_desc *irq_to_desc(unsigned int irq)
{ {
return radix_tree_lookup(&irq_desc_tree, irq); return radix_tree_lookup(&irq_desc_tree, irq);
} }
EXPORT_SYMBOL(irq_to_desc); #ifdef CONFIG_KVM_BOOK3S_64_HV
EXPORT_SYMBOL_GPL(irq_to_desc);
#endif
static void delete_irq_desc(unsigned int irq) static void delete_irq_desc(unsigned int irq)
{ {
...@@ -924,15 +926,7 @@ static bool irq_is_nmi(struct irq_desc *desc) ...@@ -924,15 +926,7 @@ static bool irq_is_nmi(struct irq_desc *desc)
return desc->istate & IRQS_NMI; return desc->istate & IRQS_NMI;
} }
/** static unsigned int kstat_irqs(unsigned int irq)
* kstat_irqs - Get the statistics for an interrupt
* @irq: The interrupt number
*
* Returns the sum of interrupt counts on all cpus since boot for
* @irq. The caller must ensure that the interrupt is not removed
* concurrently.
*/
unsigned int kstat_irqs(unsigned int irq)
{ {
struct irq_desc *desc = irq_to_desc(irq); struct irq_desc *desc = irq_to_desc(irq);
unsigned int sum = 0; unsigned int sum = 0;
...@@ -943,21 +937,22 @@ unsigned int kstat_irqs(unsigned int irq) ...@@ -943,21 +937,22 @@ unsigned int kstat_irqs(unsigned int irq)
if (!irq_settings_is_per_cpu_devid(desc) && if (!irq_settings_is_per_cpu_devid(desc) &&
!irq_settings_is_per_cpu(desc) && !irq_settings_is_per_cpu(desc) &&
!irq_is_nmi(desc)) !irq_is_nmi(desc))
return desc->tot_count; return data_race(desc->tot_count);
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
sum += *per_cpu_ptr(desc->kstat_irqs, cpu); sum += data_race(*per_cpu_ptr(desc->kstat_irqs, cpu));
return sum; return sum;
} }
/** /**
* kstat_irqs_usr - Get the statistics for an interrupt * kstat_irqs_usr - Get the statistics for an interrupt from thread context
* @irq: The interrupt number * @irq: The interrupt number
* *
* Returns the sum of interrupt counts on all cpus since boot for @irq. * Returns the sum of interrupt counts on all cpus since boot for @irq.
* Contrary to kstat_irqs() this can be called from any context. *
* It uses rcu since a concurrent removal of an interrupt descriptor is * It uses rcu to protect the access since a concurrent removal of an
* observing an rcu grace period before delayed_free_desc()/irq_kobj_release(). * interrupt descriptor is observing an rcu grace period before
* delayed_free_desc()/irq_kobj_release().
*/ */
unsigned int kstat_irqs_usr(unsigned int irq) unsigned int kstat_irqs_usr(unsigned int irq)
{ {
...@@ -968,3 +963,17 @@ unsigned int kstat_irqs_usr(unsigned int irq) ...@@ -968,3 +963,17 @@ unsigned int kstat_irqs_usr(unsigned int irq)
rcu_read_unlock(); rcu_read_unlock();
return sum; return sum;
} }
#ifdef CONFIG_LOCKDEP
void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
struct lock_class_key *request_class)
{
struct irq_desc *desc = irq_to_desc(irq);
if (desc) {
lockdep_set_class(&desc->lock, lock_class);
lockdep_set_class(&desc->request_mutex, request_class);
}
}
EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
#endif
...@@ -2822,3 +2822,40 @@ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, ...@@ -2822,3 +2822,40 @@ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
return err; return err;
} }
EXPORT_SYMBOL_GPL(irq_set_irqchip_state); EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
/**
* irq_has_action - Check whether an interrupt is requested
* @irq: The linux irq number
*
* Returns: A snapshot of the current state
*/
bool irq_has_action(unsigned int irq)
{
bool res;
rcu_read_lock();
res = irq_desc_has_action(irq_to_desc(irq));
rcu_read_unlock();
return res;
}
EXPORT_SYMBOL_GPL(irq_has_action);
/**
* irq_check_status_bit - Check whether bits in the irq descriptor status are set
* @irq: The linux irq number
* @bitmask: The bitmask to evaluate
*
* Returns: True if one of the bits in @bitmask is set
*/
bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
{
struct irq_desc *desc;
bool res = false;
rcu_read_lock();
desc = irq_to_desc(irq);
if (desc)
res = !!(desc->status_use_accessors & bitmask);
rcu_read_unlock();
return res;
}
...@@ -488,9 +488,10 @@ int show_interrupts(struct seq_file *p, void *v) ...@@ -488,9 +488,10 @@ int show_interrupts(struct seq_file *p, void *v)
if (!desc || irq_settings_is_hidden(desc)) if (!desc || irq_settings_is_hidden(desc))
goto outsparse; goto outsparse;
if (desc->kstat_irqs) if (desc->kstat_irqs) {
for_each_online_cpu(j) for_each_online_cpu(j)
any_count |= *per_cpu_ptr(desc->kstat_irqs, j); any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j));
}
if ((!desc->action || irq_desc_is_chained(desc)) && !any_count) if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
goto outsparse; goto outsparse;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment