Commit 5931e4eb authored by Thomas Gleixner's avatar Thomas Gleixner

Merge branch 'irq/core' into irq/urgent

Pull in the upstream changes so a fix for them can be applied.
parents ce7980ae 6f3ee0e2
...@@ -1277,11 +1277,11 @@ Manfred Spraul points out that you can still do this, even if the data ...@@ -1277,11 +1277,11 @@ Manfred Spraul points out that you can still do this, even if the data
is very occasionally accessed in user context or softirqs/tasklets. The is very occasionally accessed in user context or softirqs/tasklets. The
irq handler doesn't use a lock, and all other accesses are done as so:: irq handler doesn't use a lock, and all other accesses are done as so::
spin_lock(&lock); mutex_lock(&lock);
disable_irq(irq); disable_irq(irq);
... ...
enable_irq(irq); enable_irq(irq);
spin_unlock(&lock); mutex_unlock(&lock);
The disable_irq() prevents the irq handler from running The disable_irq() prevents the irq handler from running
(and waits for it to finish if it's currently running on other CPUs). (and waits for it to finish if it's currently running on other CPUs).
......
...@@ -1307,11 +1307,11 @@ se i dati vengono occasionalmente utilizzati da un contesto utente o ...@@ -1307,11 +1307,11 @@ se i dati vengono occasionalmente utilizzati da un contesto utente o
da un'interruzione software. Il gestore d'interruzione non utilizza alcun da un'interruzione software. Il gestore d'interruzione non utilizza alcun
*lock*, e tutti gli altri accessi verranno fatti così:: *lock*, e tutti gli altri accessi verranno fatti così::
spin_lock(&lock); mutex_lock(&lock);
disable_irq(irq); disable_irq(irq);
... ...
enable_irq(irq); enable_irq(irq);
spin_unlock(&lock); mutex_unlock(&lock);
La funzione disable_irq() impedisce al gestore d'interruzioni La funzione disable_irq() impedisce al gestore d'interruzioni
d'essere eseguito (e aspetta che finisca nel caso fosse in esecuzione su d'essere eseguito (e aspetta che finisca nel caso fosse in esecuzione su
......
...@@ -10942,6 +10942,8 @@ L: linux-kernel@vger.kernel.org ...@@ -10942,6 +10942,8 @@ L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: kernel/irq/ F: kernel/irq/
F: include/linux/group_cpus.h
F: lib/group_cpus.c
IRQCHIP DRIVERS IRQCHIP DRIVERS
M: Thomas Gleixner <tglx@linutronix.de> M: Thomas Gleixner <tglx@linutronix.de>
......
...@@ -2364,9 +2364,8 @@ static int mp_irqdomain_create(int ioapic) ...@@ -2364,9 +2364,8 @@ static int mp_irqdomain_create(int ioapic)
return -ENODEV; return -ENODEV;
} }
ip->irqdomain = irq_domain_create_linear(fn, hwirqs, cfg->ops, ip->irqdomain = irq_domain_create_hierarchy(parent, 0, hwirqs, fn, cfg->ops,
(void *)(long)ioapic); (void *)(long)ioapic);
if (!ip->irqdomain) { if (!ip->irqdomain) {
/* Release fw handle if it was allocated above */ /* Release fw handle if it was allocated above */
if (!cfg->dev) if (!cfg->dev)
...@@ -2374,8 +2373,6 @@ static int mp_irqdomain_create(int ioapic) ...@@ -2374,8 +2373,6 @@ static int mp_irqdomain_create(int ioapic)
return -ENOMEM; return -ENOMEM;
} }
ip->irqdomain->parent = parent;
if (cfg->type == IOAPIC_DOMAIN_LEGACY || if (cfg->type == IOAPIC_DOMAIN_LEGACY ||
cfg->type == IOAPIC_DOMAIN_STRICT) cfg->type == IOAPIC_DOMAIN_STRICT)
ioapic_dynirq_base = max(ioapic_dynirq_base, ioapic_dynirq_base = max(ioapic_dynirq_base,
......
...@@ -166,10 +166,9 @@ static struct irq_domain *uv_get_irq_domain(void) ...@@ -166,10 +166,9 @@ static struct irq_domain *uv_get_irq_domain(void)
if (!fn) if (!fn)
goto out; goto out;
uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL); uv_domain = irq_domain_create_hierarchy(x86_vector_domain, 0, 0, fn,
if (uv_domain) &uv_domain_ops, NULL);
uv_domain->parent = x86_vector_domain; if (!uv_domain)
else
irq_domain_free_fwnode(fn); irq_domain_free_fwnode(fn);
out: out:
mutex_unlock(&uv_lock); mutex_unlock(&uv_lock);
......
...@@ -10,66 +10,29 @@ ...@@ -10,66 +10,29 @@
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/group_cpus.h>
#include <linux/blk-mq.h> #include <linux/blk-mq.h>
#include "blk.h" #include "blk.h"
#include "blk-mq.h" #include "blk-mq.h"
static int queue_index(struct blk_mq_queue_map *qmap,
unsigned int nr_queues, const int q)
{
return qmap->queue_offset + (q % nr_queues);
}
static int get_first_sibling(unsigned int cpu)
{
unsigned int ret;
ret = cpumask_first(topology_sibling_cpumask(cpu));
if (ret < nr_cpu_ids)
return ret;
return cpu;
}
void blk_mq_map_queues(struct blk_mq_queue_map *qmap) void blk_mq_map_queues(struct blk_mq_queue_map *qmap)
{ {
unsigned int *map = qmap->mq_map; const struct cpumask *masks;
unsigned int nr_queues = qmap->nr_queues; unsigned int queue, cpu;
unsigned int cpu, first_sibling, q = 0;
masks = group_cpus_evenly(qmap->nr_queues);
for_each_possible_cpu(cpu) if (!masks) {
map[cpu] = -1; for_each_possible_cpu(cpu)
qmap->mq_map[cpu] = qmap->queue_offset;
/* return;
* Spread queues among present CPUs first for minimizing
* count of dead queues which are mapped by all un-present CPUs
*/
for_each_present_cpu(cpu) {
if (q >= nr_queues)
break;
map[cpu] = queue_index(qmap, nr_queues, q++);
} }
for_each_possible_cpu(cpu) { for (queue = 0; queue < qmap->nr_queues; queue++) {
if (map[cpu] != -1) for_each_cpu(cpu, &masks[queue])
continue; qmap->mq_map[cpu] = qmap->queue_offset + queue;
/*
* First do sequential mapping between CPUs and queues.
* In case we still have CPUs to map, and we have some number of
* threads per cores then map sibling threads to the same queue
* for performance optimizations.
*/
if (q < nr_queues) {
map[cpu] = queue_index(qmap, nr_queues, q++);
} else {
first_sibling = get_first_sibling(cpu);
if (first_sibling == cpu)
map[cpu] = queue_index(qmap, nr_queues, q++);
else
map[cpu] = map[first_sibling];
}
} }
kfree(masks);
} }
EXPORT_SYMBOL_GPL(blk_mq_map_queues); EXPORT_SYMBOL_GPL(blk_mq_map_queues);
......
...@@ -389,7 +389,7 @@ config LS_EXTIRQ ...@@ -389,7 +389,7 @@ config LS_EXTIRQ
config LS_SCFG_MSI config LS_SCFG_MSI
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
depends on PCI && PCI_MSI depends on PCI_MSI
config PARTITION_PERCPU config PARTITION_PERCPU
bool bool
...@@ -658,6 +658,7 @@ config APPLE_AIC ...@@ -658,6 +658,7 @@ config APPLE_AIC
bool "Apple Interrupt Controller (AIC)" bool "Apple Interrupt Controller (AIC)"
depends on ARM64 depends on ARM64
depends on ARCH_APPLE || COMPILE_TEST depends on ARCH_APPLE || COMPILE_TEST
select GENERIC_IRQ_IPI_MUX
help help
Support for the Apple Interrupt Controller found on Apple Silicon SoCs, Support for the Apple Interrupt Controller found on Apple Silicon SoCs,
such as the M1. such as the M1.
......
...@@ -199,21 +199,20 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv, ...@@ -199,21 +199,20 @@ static int alpine_msix_init_domains(struct alpine_msix_data *priv,
} }
gic_domain = irq_find_host(gic_node); gic_domain = irq_find_host(gic_node);
of_node_put(gic_node);
if (!gic_domain) { if (!gic_domain) {
pr_err("Failed to find the GIC domain\n"); pr_err("Failed to find the GIC domain\n");
return -ENXIO; return -ENXIO;
} }
middle_domain = irq_domain_add_tree(NULL, middle_domain = irq_domain_add_hierarchy(gic_domain, 0, 0, NULL,
&alpine_msix_middle_domain_ops, &alpine_msix_middle_domain_ops,
priv); priv);
if (!middle_domain) { if (!middle_domain) {
pr_err("Failed to create the MSIX middle domain\n"); pr_err("Failed to create the MSIX middle domain\n");
return -ENOMEM; return -ENOMEM;
} }
middle_domain->parent = gic_domain;
msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node),
&alpine_msix_domain_info, &alpine_msix_domain_info,
middle_domain); middle_domain);
......
...@@ -292,7 +292,6 @@ struct aic_irq_chip { ...@@ -292,7 +292,6 @@ struct aic_irq_chip {
void __iomem *base; void __iomem *base;
void __iomem *event; void __iomem *event;
struct irq_domain *hw_domain; struct irq_domain *hw_domain;
struct irq_domain *ipi_domain;
struct { struct {
cpumask_t aff; cpumask_t aff;
} *fiq_aff[AIC_NR_FIQ]; } *fiq_aff[AIC_NR_FIQ];
...@@ -307,9 +306,6 @@ struct aic_irq_chip { ...@@ -307,9 +306,6 @@ struct aic_irq_chip {
static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked); static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
static struct aic_irq_chip *aic_irqc; static struct aic_irq_chip *aic_irqc;
static void aic_handle_ipi(struct pt_regs *regs); static void aic_handle_ipi(struct pt_regs *regs);
...@@ -751,98 +747,8 @@ static void aic_ipi_send_fast(int cpu) ...@@ -751,98 +747,8 @@ static void aic_ipi_send_fast(int cpu)
isb(); isb();
} }
static void aic_ipi_mask(struct irq_data *d)
{
u32 irq_bit = BIT(irqd_to_hwirq(d));
/* No specific ordering requirements needed here. */
atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
}
static void aic_ipi_unmask(struct irq_data *d)
{
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
u32 irq_bit = BIT(irqd_to_hwirq(d));
atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
/*
* The atomic_or() above must complete before the atomic_read()
* below to avoid racing aic_ipi_send_mask().
*/
smp_mb__after_atomic();
/*
* If a pending vIPI was unmasked, raise a HW IPI to ourselves.
* No barriers needed here since this is a self-IPI.
*/
if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
if (static_branch_likely(&use_fast_ipi))
aic_ipi_send_fast(smp_processor_id());
else
aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
}
}
static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
{
struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
u32 irq_bit = BIT(irqd_to_hwirq(d));
u32 send = 0;
int cpu;
unsigned long pending;
for_each_cpu(cpu, mask) {
/*
* This sequence is the mirror of the one in aic_ipi_unmask();
* see the comment there. Additionally, release semantics
* ensure that the vIPI flag set is ordered after any shared
* memory accesses that precede it. This therefore also pairs
* with the atomic_fetch_andnot in aic_handle_ipi().
*/
pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
/*
* The atomic_fetch_or_release() above must complete before the
* atomic_read() below to avoid racing aic_ipi_unmask().
*/
smp_mb__after_atomic();
if (!(pending & irq_bit) &&
(atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
if (static_branch_likely(&use_fast_ipi))
aic_ipi_send_fast(cpu);
else
send |= AIC_IPI_SEND_CPU(cpu);
}
}
/*
* The flag writes must complete before the physical IPI is issued
* to another CPU. This is implied by the control dependency on
* the result of atomic_read_acquire() above, which is itself
* already ordered after the vIPI flag write.
*/
if (send)
aic_ic_write(ic, AIC_IPI_SEND, send);
}
static struct irq_chip ipi_chip = {
.name = "AIC-IPI",
.irq_mask = aic_ipi_mask,
.irq_unmask = aic_ipi_unmask,
.ipi_send_mask = aic_ipi_send_mask,
};
/*
* IPI IRQ domain
*/
static void aic_handle_ipi(struct pt_regs *regs) static void aic_handle_ipi(struct pt_regs *regs)
{ {
int i;
unsigned long enabled, firing;
/* /*
* Ack the IPI. We need to order this after the AIC event read, but * Ack the IPI. We need to order this after the AIC event read, but
* that is enforced by normal MMIO ordering guarantees. * that is enforced by normal MMIO ordering guarantees.
...@@ -857,27 +763,7 @@ static void aic_handle_ipi(struct pt_regs *regs) ...@@ -857,27 +763,7 @@ static void aic_handle_ipi(struct pt_regs *regs)
aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER); aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
} }
/* ipi_mux_process();
* The mask read does not need to be ordered. Only we can change
* our own mask anyway, so no races are possible here, as long as
* we are properly in the interrupt handler (which is covered by
* the barrier that is part of the top-level AIC handler's readl()).
*/
enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
/*
* Clear the IPIs we are about to handle. This pairs with the
* atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
* ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
* before IPI handling code (to avoid races handling vIPIs before they
* are signaled). The former is taken care of by the release semantics
* of the write portion, while the latter is taken care of by the
* acquire semantics of the read portion.
*/
firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
for_each_set_bit(i, &firing, AIC_NR_SWIPI)
generic_handle_domain_irq(aic_irqc->ipi_domain, i);
/* /*
* No ordering needed here; at worst this just changes the timing of * No ordering needed here; at worst this just changes the timing of
...@@ -887,55 +773,24 @@ static void aic_handle_ipi(struct pt_regs *regs) ...@@ -887,55 +773,24 @@ static void aic_handle_ipi(struct pt_regs *regs)
aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
} }
static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq, static void aic_ipi_send_single(unsigned int cpu)
unsigned int nr_irqs, void *args)
{ {
int i; if (static_branch_likely(&use_fast_ipi))
aic_ipi_send_fast(cpu);
for (i = 0; i < nr_irqs; i++) { else
irq_set_percpu_devid(virq + i); aic_ic_write(aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu));
irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
}
return 0;
}
static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
{
/* Not freeing IPIs */
} }
static const struct irq_domain_ops aic_ipi_domain_ops = {
.alloc = aic_ipi_alloc,
.free = aic_ipi_free,
};
static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node) static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
{ {
struct irq_domain *ipi_domain;
int base_ipi; int base_ipi;
ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI, base_ipi = ipi_mux_create(AIC_NR_SWIPI, aic_ipi_send_single);
&aic_ipi_domain_ops, irqc); if (WARN_ON(base_ipi <= 0))
if (WARN_ON(!ipi_domain))
return -ENODEV;
ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI,
NUMA_NO_NODE, NULL, false, NULL);
if (WARN_ON(!base_ipi)) {
irq_domain_remove(ipi_domain);
return -ENODEV; return -ENODEV;
}
set_smp_ipi_range(base_ipi, AIC_NR_SWIPI); set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
irqc->ipi_domain = ipi_domain;
return 0; return 0;
} }
......
...@@ -454,8 +454,7 @@ static __init void armada_xp_ipi_init(struct device_node *node) ...@@ -454,8 +454,7 @@ static __init void armada_xp_ipi_init(struct device_node *node)
return; return;
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI); irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, IPI_DOORBELL_END, base_ipi = irq_domain_alloc_irqs(ipi_domain, IPI_DOORBELL_END, NUMA_NO_NODE, NULL);
NUMA_NO_NODE, NULL, false, NULL);
if (WARN_ON(!base_ipi)) if (WARN_ON(!base_ipi))
return; return;
......
...@@ -17,8 +17,9 @@ ...@@ -17,8 +17,9 @@
#define ASPEED_SCU_IC_REG 0x018 #define ASPEED_SCU_IC_REG 0x018
#define ASPEED_SCU_IC_SHIFT 0 #define ASPEED_SCU_IC_SHIFT 0
#define ASPEED_SCU_IC_ENABLE GENMASK(6, ASPEED_SCU_IC_SHIFT) #define ASPEED_SCU_IC_ENABLE GENMASK(15, ASPEED_SCU_IC_SHIFT)
#define ASPEED_SCU_IC_NUM_IRQS 7 #define ASPEED_SCU_IC_NUM_IRQS 7
#define ASPEED_SCU_IC_STATUS GENMASK(28, 16)
#define ASPEED_SCU_IC_STATUS_SHIFT 16 #define ASPEED_SCU_IC_STATUS_SHIFT 16
#define ASPEED_AST2600_SCU_IC0_REG 0x560 #define ASPEED_AST2600_SCU_IC0_REG 0x560
...@@ -155,6 +156,8 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic, ...@@ -155,6 +156,8 @@ static int aspeed_scu_ic_of_init_common(struct aspeed_scu_ic *scu_ic,
rc = PTR_ERR(scu_ic->scu); rc = PTR_ERR(scu_ic->scu);
goto err; goto err;
} }
regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_STATUS, ASPEED_SCU_IC_STATUS);
regmap_write_bits(scu_ic->scu, scu_ic->reg, ASPEED_SCU_IC_ENABLE, 0);
irq = irq_of_parse_and_map(node, 0); irq = irq_of_parse_and_map(node, 0);
if (!irq) { if (!irq) {
......
...@@ -268,10 +268,7 @@ static void __init bcm2836_arm_irqchip_smp_init(void) ...@@ -268,10 +268,7 @@ static void __init bcm2836_arm_irqchip_smp_init(void)
ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE; ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI); irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, BITS_PER_MBOX, base_ipi = irq_domain_alloc_irqs(ipi_domain, BITS_PER_MBOX, NUMA_NO_NODE, NULL);
NUMA_NO_NODE, NULL,
false, NULL);
if (WARN_ON(!base_ipi)) if (WARN_ON(!base_ipi))
return; return;
......
...@@ -279,7 +279,8 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn, ...@@ -279,7 +279,8 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
flags |= IRQ_GC_BE_IO; flags |= IRQ_GC_BE_IO;
ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1, ret = irq_alloc_domain_generic_chips(data->domain, IRQS_PER_WORD, 1,
dn->full_name, handle_level_irq, clr, 0, flags); dn->full_name, handle_level_irq, clr,
IRQ_LEVEL, flags);
if (ret) { if (ret) {
pr_err("failed to allocate generic irq chip\n"); pr_err("failed to allocate generic irq chip\n");
goto out_free_domain; goto out_free_domain;
......
...@@ -161,6 +161,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, ...@@ -161,6 +161,7 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
*init_params) *init_params)
{ {
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
unsigned int set = 0;
struct brcmstb_l2_intc_data *data; struct brcmstb_l2_intc_data *data;
struct irq_chip_type *ct; struct irq_chip_type *ct;
int ret; int ret;
...@@ -208,9 +209,12 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, ...@@ -208,9 +209,12 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
flags |= IRQ_GC_BE_IO; flags |= IRQ_GC_BE_IO;
if (init_params->handler == handle_level_irq)
set |= IRQ_LEVEL;
/* Allocate a single Generic IRQ chip for this node */ /* Allocate a single Generic IRQ chip for this node */
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
np->full_name, init_params->handler, clr, 0, flags); np->full_name, init_params->handler, clr, set, flags);
if (ret) { if (ret) {
pr_err("failed to allocate generic irq chip\n"); pr_err("failed to allocate generic irq chip\n");
goto out_free_domain; goto out_free_domain;
......
...@@ -287,15 +287,14 @@ static __init int gicv2m_allocate_domains(struct irq_domain *parent) ...@@ -287,15 +287,14 @@ static __init int gicv2m_allocate_domains(struct irq_domain *parent)
if (!v2m) if (!v2m)
return 0; return 0;
inner_domain = irq_domain_create_tree(v2m->fwnode, inner_domain = irq_domain_create_hierarchy(parent, 0, 0, v2m->fwnode,
&gicv2m_domain_ops, v2m); &gicv2m_domain_ops, v2m);
if (!inner_domain) { if (!inner_domain) {
pr_err("Failed to create GICv2m domain\n"); pr_err("Failed to create GICv2m domain\n");
return -ENOMEM; return -ENOMEM;
} }
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
inner_domain->parent = parent;
pci_domain = pci_msi_create_irq_domain(v2m->fwnode, pci_domain = pci_msi_create_irq_domain(v2m->fwnode,
&gicv2m_msi_domain_info, &gicv2m_msi_domain_info,
inner_domain); inner_domain);
......
...@@ -4909,18 +4909,19 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) ...@@ -4909,18 +4909,19 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its); info->ops = &its_msi_domain_ops;
info->data = its;
inner_domain = irq_domain_create_hierarchy(its_parent,
its->msi_domain_flags, 0,
handle, &its_domain_ops,
info);
if (!inner_domain) { if (!inner_domain) {
kfree(info); kfree(info);
return -ENOMEM; return -ENOMEM;
} }
inner_domain->parent = its_parent;
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
inner_domain->flags |= its->msi_domain_flags;
info->ops = &its_msi_domain_ops;
info->data = its;
inner_domain->host_data = info;
return 0; return 0;
} }
......
...@@ -233,13 +233,12 @@ static int mbi_allocate_domains(struct irq_domain *parent) ...@@ -233,13 +233,12 @@ static int mbi_allocate_domains(struct irq_domain *parent)
struct irq_domain *nexus_domain, *pci_domain, *plat_domain; struct irq_domain *nexus_domain, *pci_domain, *plat_domain;
int err; int err;
nexus_domain = irq_domain_create_tree(parent->fwnode, nexus_domain = irq_domain_create_hierarchy(parent, 0, 0, parent->fwnode,
&mbi_domain_ops, NULL); &mbi_domain_ops, NULL);
if (!nexus_domain) if (!nexus_domain)
return -ENOMEM; return -ENOMEM;
irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS); irq_domain_update_bus_token(nexus_domain, DOMAIN_BUS_NEXUS);
nexus_domain->parent = parent;
err = mbi_allocate_pci_domain(nexus_domain, &pci_domain); err = mbi_allocate_pci_domain(nexus_domain, &pci_domain);
......
...@@ -1310,9 +1310,7 @@ static void __init gic_smp_init(void) ...@@ -1310,9 +1310,7 @@ static void __init gic_smp_init(void)
gic_starting_cpu, NULL); gic_starting_cpu, NULL);
/* Register all 8 non-secure SGIs */ /* Register all 8 non-secure SGIs */
base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8, base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec);
NUMA_NO_NODE, &sgi_fwspec,
false, NULL);
if (WARN_ON(base_sgi <= 0)) if (WARN_ON(base_sgi <= 0))
return; return;
......
...@@ -139,9 +139,7 @@ static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx) ...@@ -139,9 +139,7 @@ static int its_alloc_vcpu_sgis(struct its_vpe *vpe, int idx)
if (!vpe->sgi_domain) if (!vpe->sgi_domain)
goto err; goto err;
sgi_base = __irq_domain_alloc_irqs(vpe->sgi_domain, -1, 16, sgi_base = irq_domain_alloc_irqs(vpe->sgi_domain, 16, NUMA_NO_NODE, vpe);
NUMA_NO_NODE, vpe,
false, NULL);
if (sgi_base <= 0) if (sgi_base <= 0)
goto err; goto err;
...@@ -176,9 +174,8 @@ int its_alloc_vcpu_irqs(struct its_vm *vm) ...@@ -176,9 +174,8 @@ int its_alloc_vcpu_irqs(struct its_vm *vm)
vm->vpes[i]->idai = true; vm->vpes[i]->idai = true;
} }
vpe_base_irq = __irq_domain_alloc_irqs(vm->domain, -1, vm->nr_vpes, vpe_base_irq = irq_domain_alloc_irqs(vm->domain, vm->nr_vpes,
NUMA_NO_NODE, vm, NUMA_NO_NODE, vm);
false, NULL);
if (vpe_base_irq <= 0) if (vpe_base_irq <= 0)
goto err; goto err;
......
...@@ -868,9 +868,7 @@ static __init void gic_smp_init(void) ...@@ -868,9 +868,7 @@ static __init void gic_smp_init(void)
"irqchip/arm/gic:starting", "irqchip/arm/gic:starting",
gic_starting_cpu, NULL); gic_starting_cpu, NULL);
base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8, base_sgi = irq_domain_alloc_irqs(gic_data[0].domain, 8, NUMA_NO_NODE, &sgi_fwspec);
NUMA_NO_NODE, &sgi_fwspec,
false, NULL);
if (WARN_ON(base_sgi <= 0)) if (WARN_ON(base_sgi <= 0))
return; return;
......
...@@ -55,6 +55,8 @@ struct liointc_priv { ...@@ -55,6 +55,8 @@ struct liointc_priv {
struct liointc_handler_data handler[LIOINTC_NUM_PARENT]; struct liointc_handler_data handler[LIOINTC_NUM_PARENT];
void __iomem *core_isr[LIOINTC_NUM_CORES]; void __iomem *core_isr[LIOINTC_NUM_CORES];
u8 map_cache[LIOINTC_CHIP_IRQ]; u8 map_cache[LIOINTC_CHIP_IRQ];
u32 int_pol;
u32 int_edge;
bool has_lpc_irq_errata; bool has_lpc_irq_errata;
}; };
...@@ -138,6 +140,14 @@ static int liointc_set_type(struct irq_data *data, unsigned int type) ...@@ -138,6 +140,14 @@ static int liointc_set_type(struct irq_data *data, unsigned int type)
return 0; return 0;
} }
static void liointc_suspend(struct irq_chip_generic *gc)
{
struct liointc_priv *priv = gc->private;
priv->int_pol = readl(gc->reg_base + LIOINTC_REG_INTC_POL);
priv->int_edge = readl(gc->reg_base + LIOINTC_REG_INTC_EDGE);
}
static void liointc_resume(struct irq_chip_generic *gc) static void liointc_resume(struct irq_chip_generic *gc)
{ {
struct liointc_priv *priv = gc->private; struct liointc_priv *priv = gc->private;
...@@ -150,6 +160,8 @@ static void liointc_resume(struct irq_chip_generic *gc) ...@@ -150,6 +160,8 @@ static void liointc_resume(struct irq_chip_generic *gc)
/* Restore map cache */ /* Restore map cache */
for (i = 0; i < LIOINTC_CHIP_IRQ; i++) for (i = 0; i < LIOINTC_CHIP_IRQ; i++)
writeb(priv->map_cache[i], gc->reg_base + i); writeb(priv->map_cache[i], gc->reg_base + i);
writel(priv->int_pol, gc->reg_base + LIOINTC_REG_INTC_POL);
writel(priv->int_edge, gc->reg_base + LIOINTC_REG_INTC_EDGE);
/* Restore mask cache */ /* Restore mask cache */
writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE); writel(gc->mask_cache, gc->reg_base + LIOINTC_REG_INTC_ENABLE);
irq_gc_unlock_irqrestore(gc, flags); irq_gc_unlock_irqrestore(gc, flags);
...@@ -269,6 +281,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision, ...@@ -269,6 +281,7 @@ static int liointc_init(phys_addr_t addr, unsigned long size, int revision,
gc->private = priv; gc->private = priv;
gc->reg_base = base; gc->reg_base = base;
gc->domain = domain; gc->domain = domain;
gc->suspend = liointc_suspend;
gc->resume = liointc_resume; gc->resume = liointc_resume;
ct = gc->chip_types; ct = gc->chip_types;
......
...@@ -163,16 +163,15 @@ static int pch_msi_init_domains(struct pch_msi_data *priv, ...@@ -163,16 +163,15 @@ static int pch_msi_init_domains(struct pch_msi_data *priv,
{ {
struct irq_domain *middle_domain, *msi_domain; struct irq_domain *middle_domain, *msi_domain;
middle_domain = irq_domain_create_linear(domain_handle, middle_domain = irq_domain_create_hierarchy(parent, 0, priv->num_irqs,
priv->num_irqs, domain_handle,
&pch_msi_middle_domain_ops, &pch_msi_middle_domain_ops,
priv); priv);
if (!middle_domain) { if (!middle_domain) {
pr_err("Failed to create the MSI middle domain\n"); pr_err("Failed to create the MSI middle domain\n");
return -ENOMEM; return -ENOMEM;
} }
middle_domain->parent = parent;
irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS); irq_domain_update_bus_token(middle_domain, DOMAIN_BUS_NEXUS);
msi_domain = pci_msi_create_irq_domain(domain_handle, msi_domain = pci_msi_create_irq_domain(domain_handle,
......
...@@ -221,6 +221,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev) ...@@ -221,6 +221,7 @@ static int mvebu_gicp_probe(struct platform_device *pdev)
} }
parent_domain = irq_find_host(irq_parent_dn); parent_domain = irq_find_host(irq_parent_dn);
of_node_put(irq_parent_dn);
if (!parent_domain) { if (!parent_domain) {
dev_err(&pdev->dev, "failed to find parent IRQ domain\n"); dev_err(&pdev->dev, "failed to find parent IRQ domain\n");
return -ENODEV; return -ENODEV;
......
...@@ -161,7 +161,7 @@ static struct msi_domain_info odmi_msi_domain_info = { ...@@ -161,7 +161,7 @@ static struct msi_domain_info odmi_msi_domain_info = {
static int __init mvebu_odmi_init(struct device_node *node, static int __init mvebu_odmi_init(struct device_node *node,
struct device_node *parent) struct device_node *parent)
{ {
struct irq_domain *inner_domain, *plat_domain; struct irq_domain *parent_domain, *inner_domain, *plat_domain;
int ret, i; int ret, i;
if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count)) if (of_property_read_u32(node, "marvell,odmi-frames", &odmis_count))
...@@ -197,16 +197,17 @@ static int __init mvebu_odmi_init(struct device_node *node, ...@@ -197,16 +197,17 @@ static int __init mvebu_odmi_init(struct device_node *node,
} }
} }
inner_domain = irq_domain_create_linear(of_node_to_fwnode(node), parent_domain = irq_find_host(parent);
odmis_count * NODMIS_PER_FRAME,
&odmi_domain_ops, NULL); inner_domain = irq_domain_create_hierarchy(parent_domain, 0,
odmis_count * NODMIS_PER_FRAME,
of_node_to_fwnode(node),
&odmi_domain_ops, NULL);
if (!inner_domain) { if (!inner_domain) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_unmap; goto err_unmap;
} }
inner_domain->parent = irq_find_host(parent);
plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node), plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(node),
&odmi_msi_domain_info, &odmi_msi_domain_info,
inner_domain); inner_domain);
......
...@@ -236,6 +236,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev) ...@@ -236,6 +236,7 @@ static int ti_sci_intr_irq_domain_probe(struct platform_device *pdev)
} }
parent_domain = irq_find_host(parent_node); parent_domain = irq_find_host(parent_node);
of_node_put(parent_node);
if (!parent_domain) { if (!parent_domain) {
dev_err(dev, "Failed to find IRQ parent domain\n"); dev_err(dev, "Failed to find IRQ parent domain\n");
return -ENODEV; return -ENODEV;
......
...@@ -38,8 +38,10 @@ int platform_irqchip_probe(struct platform_device *pdev) ...@@ -38,8 +38,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
struct device_node *par_np = of_irq_find_parent(np); struct device_node *par_np = of_irq_find_parent(np);
of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev); of_irq_init_cb_t irq_init_cb = of_device_get_match_data(&pdev->dev);
if (!irq_init_cb) if (!irq_init_cb) {
of_node_put(par_np);
return -EINVAL; return -EINVAL;
}
if (par_np == np) if (par_np == np)
par_np = NULL; par_np = NULL;
...@@ -52,8 +54,10 @@ int platform_irqchip_probe(struct platform_device *pdev) ...@@ -52,8 +54,10 @@ int platform_irqchip_probe(struct platform_device *pdev)
* interrupt controller. The actual initialization callback of this * interrupt controller. The actual initialization callback of this
* interrupt controller can check for specific domains as necessary. * interrupt controller can check for specific domains as necessary.
*/ */
if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) if (par_np && !irq_find_matching_host(par_np, DOMAIN_BUS_ANY)) {
of_node_put(par_np);
return -EPROBE_DEFER; return -EPROBE_DEFER;
}
return irq_init_cb(np, par_np); return irq_init_cb(np, par_np);
} }
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2016 Thomas Gleixner.
* Copyright (C) 2016-2017 Christoph Hellwig.
*/
#ifndef __LINUX_GROUP_CPUS_H
#define __LINUX_GROUP_CPUS_H
#include <linux/kernel.h>
#include <linux/cpu.h>
struct cpumask *group_cpus_evenly(unsigned int numgrps);
#endif
...@@ -1266,6 +1266,9 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); ...@@ -1266,6 +1266,9 @@ int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest);
int ipi_send_single(unsigned int virq, unsigned int cpu); int ipi_send_single(unsigned int virq, unsigned int cpu);
int ipi_send_mask(unsigned int virq, const struct cpumask *dest); int ipi_send_mask(unsigned int virq, const struct cpumask *dest);
void ipi_mux_process(void);
int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu));
#ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
/* /*
* Registers a generic IRQ handling function as the top-level IRQ handler in * Registers a generic IRQ handling function as the top-level IRQ handler in
......
...@@ -125,6 +125,8 @@ struct irq_domain_chip_generic; ...@@ -125,6 +125,8 @@ struct irq_domain_chip_generic;
* core code. * core code.
* @flags: Per irq_domain flags * @flags: Per irq_domain flags
* @mapcount: The number of mapped interrupts * @mapcount: The number of mapped interrupts
* @mutex: Domain lock, hierarchical domains use root domain's lock
* @root: Pointer to root domain, or containing structure if non-hierarchical
* *
* Optional elements: * Optional elements:
* @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy * @fwnode: Pointer to firmware node associated with the irq_domain. Pretty easy
...@@ -143,7 +145,6 @@ struct irq_domain_chip_generic; ...@@ -143,7 +145,6 @@ struct irq_domain_chip_generic;
* Revmap data, used internally by the irq domain code: * Revmap data, used internally by the irq domain code:
* @revmap_size: Size of the linear map table @revmap[] * @revmap_size: Size of the linear map table @revmap[]
* @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map * @revmap_tree: Radix map tree for hwirqs that don't fit in the linear map
* @revmap_mutex: Lock for the revmap
* @revmap: Linear table of irq_data pointers * @revmap: Linear table of irq_data pointers
*/ */
struct irq_domain { struct irq_domain {
...@@ -153,6 +154,8 @@ struct irq_domain { ...@@ -153,6 +154,8 @@ struct irq_domain {
void *host_data; void *host_data;
unsigned int flags; unsigned int flags;
unsigned int mapcount; unsigned int mapcount;
struct mutex mutex;
struct irq_domain *root;
/* Optional data */ /* Optional data */
struct fwnode_handle *fwnode; struct fwnode_handle *fwnode;
...@@ -171,7 +174,6 @@ struct irq_domain { ...@@ -171,7 +174,6 @@ struct irq_domain {
irq_hw_number_t hwirq_max; irq_hw_number_t hwirq_max;
unsigned int revmap_size; unsigned int revmap_size;
struct radix_tree_root revmap_tree; struct radix_tree_root revmap_tree;
struct mutex revmap_mutex;
struct irq_data __rcu *revmap[]; struct irq_data __rcu *revmap[];
}; };
......
...@@ -86,6 +86,11 @@ config GENERIC_IRQ_IPI ...@@ -86,6 +86,11 @@ config GENERIC_IRQ_IPI
depends on SMP depends on SMP
select IRQ_DOMAIN_HIERARCHY select IRQ_DOMAIN_HIERARCHY
# Generic IRQ IPI Mux support
config GENERIC_IRQ_IPI_MUX
bool
depends on SMP
# Generic MSI hierarchical interrupt domain support # Generic MSI hierarchical interrupt domain support
config GENERIC_MSI_IRQ config GENERIC_MSI_IRQ
bool bool
......
...@@ -15,6 +15,7 @@ obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o ...@@ -15,6 +15,7 @@ obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
obj-$(CONFIG_PM_SLEEP) += pm.o obj-$(CONFIG_PM_SLEEP) += pm.o
obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o
obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o obj-$(CONFIG_GENERIC_IRQ_IPI) += ipi.o
obj-$(CONFIG_GENERIC_IRQ_IPI_MUX) += ipi-mux.o
obj-$(CONFIG_SMP) += affinity.o obj-$(CONFIG_SMP) += affinity.o
obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o obj-$(CONFIG_GENERIC_IRQ_DEBUGFS) += debugfs.o
obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o obj-$(CONFIG_GENERIC_IRQ_MATRIX_ALLOCATOR) += matrix.o
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* Multiplex several virtual IPIs over a single HW IPI.
*
* Copyright The Asahi Linux Contributors
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
#define pr_fmt(fmt) "ipi-mux: " fmt
#include <linux/cpu.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/jump_label.h>
#include <linux/percpu.h>
#include <linux/smp.h>
struct ipi_mux_cpu {
atomic_t enable;
atomic_t bits;
};
static struct ipi_mux_cpu __percpu *ipi_mux_pcpu;
static struct irq_domain *ipi_mux_domain;
static void (*ipi_mux_send)(unsigned int cpu);
static void ipi_mux_mask(struct irq_data *d)
{
struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
atomic_andnot(BIT(irqd_to_hwirq(d)), &icpu->enable);
}
static void ipi_mux_unmask(struct irq_data *d)
{
struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
u32 ibit = BIT(irqd_to_hwirq(d));
atomic_or(ibit, &icpu->enable);
/*
* The atomic_or() above must complete before the atomic_read()
* below to avoid racing ipi_mux_send_mask().
*/
smp_mb__after_atomic();
/* If a pending IPI was unmasked, raise a parent IPI immediately. */
if (atomic_read(&icpu->bits) & ibit)
ipi_mux_send(smp_processor_id());
}
static void ipi_mux_send_mask(struct irq_data *d, const struct cpumask *mask)
{
struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
u32 ibit = BIT(irqd_to_hwirq(d));
unsigned long pending;
int cpu;
for_each_cpu(cpu, mask) {
icpu = per_cpu_ptr(ipi_mux_pcpu, cpu);
/*
* This sequence is the mirror of the one in ipi_mux_unmask();
* see the comment there. Additionally, release semantics
* ensure that the vIPI flag set is ordered after any shared
* memory accesses that precede it. This therefore also pairs
* with the atomic_fetch_andnot in ipi_mux_process().
*/
pending = atomic_fetch_or_release(ibit, &icpu->bits);
/*
* The atomic_fetch_or_release() above must complete
* before the atomic_read() below to avoid racing with
* ipi_mux_unmask().
*/
smp_mb__after_atomic();
/*
* The flag writes must complete before the physical IPI is
* issued to another CPU. This is implied by the control
* dependency on the result of atomic_read() below, which is
* itself already ordered after the vIPI flag write.
*/
if (!(pending & ibit) && (atomic_read(&icpu->enable) & ibit))
ipi_mux_send(cpu);
}
}
static const struct irq_chip ipi_mux_chip = {
.name = "IPI Mux",
.irq_mask = ipi_mux_mask,
.irq_unmask = ipi_mux_unmask,
.ipi_send_mask = ipi_mux_send_mask,
};
static int ipi_mux_domain_alloc(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
int i;
for (i = 0; i < nr_irqs; i++) {
irq_set_percpu_devid(virq + i);
irq_domain_set_info(d, virq + i, i, &ipi_mux_chip, NULL,
handle_percpu_devid_irq, NULL, NULL);
}
return 0;
}
static const struct irq_domain_ops ipi_mux_domain_ops = {
.alloc = ipi_mux_domain_alloc,
.free = irq_domain_free_irqs_top,
};
/**
* ipi_mux_process - Process multiplexed virtual IPIs
*/
void ipi_mux_process(void)
{
struct ipi_mux_cpu *icpu = this_cpu_ptr(ipi_mux_pcpu);
irq_hw_number_t hwirq;
unsigned long ipis;
unsigned int en;
/*
* Reading enable mask does not need to be ordered as long as
* this function is called from interrupt handler because only
* the CPU itself can change it's own enable mask.
*/
en = atomic_read(&icpu->enable);
/*
* Clear the IPIs we are about to handle. This pairs with the
* atomic_fetch_or_release() in ipi_mux_send_mask().
*/
ipis = atomic_fetch_andnot(en, &icpu->bits) & en;
for_each_set_bit(hwirq, &ipis, BITS_PER_TYPE(int))
generic_handle_domain_irq(ipi_mux_domain, hwirq);
}
/**
* ipi_mux_create - Create virtual IPIs multiplexed on top of a single
* parent IPI.
* @nr_ipi: number of virtual IPIs to create. This should
* be <= BITS_PER_TYPE(int)
* @mux_send: callback to trigger parent IPI for a particular CPU
*
* Returns first virq of the newly created virtual IPIs upon success
* or <=0 upon failure
*/
int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu))
{
struct fwnode_handle *fwnode;
struct irq_domain *domain;
int rc;
if (ipi_mux_domain)
return -EEXIST;
if (BITS_PER_TYPE(int) < nr_ipi || !mux_send)
return -EINVAL;
ipi_mux_pcpu = alloc_percpu(typeof(*ipi_mux_pcpu));
if (!ipi_mux_pcpu)
return -ENOMEM;
fwnode = irq_domain_alloc_named_fwnode("IPI-Mux");
if (!fwnode) {
pr_err("unable to create IPI Mux fwnode\n");
rc = -ENOMEM;
goto fail_free_cpu;
}
domain = irq_domain_create_linear(fwnode, nr_ipi,
&ipi_mux_domain_ops, NULL);
if (!domain) {
pr_err("unable to add IPI Mux domain\n");
rc = -ENOMEM;
goto fail_free_fwnode;
}
domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
irq_domain_update_bus_token(domain, DOMAIN_BUS_IPI);
rc = irq_domain_alloc_irqs(domain, nr_ipi, NUMA_NO_NODE, NULL);
if (rc <= 0) {
pr_err("unable to alloc IRQs from IPI Mux domain\n");
goto fail_free_domain;
}
ipi_mux_domain = domain;
ipi_mux_send = mux_send;
return rc;
fail_free_domain:
irq_domain_remove(domain);
fail_free_fwnode:
irq_domain_free_fwnode(fwnode);
fail_free_cpu:
free_percpu(ipi_mux_pcpu);
return rc;
}
This diff is collapsed.
...@@ -723,10 +723,13 @@ EXPORT_SYMBOL(disable_irq_nosync); ...@@ -723,10 +723,13 @@ EXPORT_SYMBOL(disable_irq_nosync);
* to complete before returning. If you use this function while * to complete before returning. If you use this function while
* holding a resource the IRQ handler may need you will deadlock. * holding a resource the IRQ handler may need you will deadlock.
* *
* This function may be called - with care - from IRQ context. * Can only be called from preemptible code as it might sleep when
* an interrupt thread is associated to @irq.
*
*/ */
void disable_irq(unsigned int irq) void disable_irq(unsigned int irq)
{ {
might_sleep();
if (!__disable_irq_nosync(irq)) if (!__disable_irq_nosync(irq))
synchronize_irq(irq); synchronize_irq(irq);
} }
......
...@@ -353,6 +353,8 @@ obj-$(CONFIG_SBITMAP) += sbitmap.o ...@@ -353,6 +353,8 @@ obj-$(CONFIG_SBITMAP) += sbitmap.o
obj-$(CONFIG_PARMAN) += parman.o obj-$(CONFIG_PARMAN) += parman.o
obj-y += group_cpus.o
# GCC library routines # GCC library routines
obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o obj-$(CONFIG_GENERIC_LIB_ASHLDI3) += ashldi3.o
obj-$(CONFIG_GENERIC_LIB_ASHRDI3) += ashrdi3.o obj-$(CONFIG_GENERIC_LIB_ASHRDI3) += ashrdi3.o
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment