Commit 64a267e9 authored by Marc Zyngier's avatar Marc Zyngier

irqchip/gic: Configure SGIs as standard interrupts

Change the way we deal with GIC SGIs by turning them into proper
IRQs, and calling into the arch code to register the interrupt range
instead of a callback.
Reviewed-by: default avatarValentin Schneider <valentin.schneider@arm.com>
Signed-off-by: default avatarMarc Zyngier <maz@kernel.org>
parent 7ec46b51
...@@ -124,6 +124,8 @@ static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly; ...@@ -124,6 +124,8 @@ static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
static struct gic_kvm_info gic_v2_kvm_info; static struct gic_kvm_info gic_v2_kvm_info;
static DEFINE_PER_CPU(u32, sgi_intid);
#ifdef CONFIG_GIC_NON_BANKED #ifdef CONFIG_GIC_NON_BANKED
static void __iomem *gic_get_percpu_base(union gic_base *base) static void __iomem *gic_get_percpu_base(union gic_base *base)
{ {
...@@ -226,16 +228,26 @@ static void gic_unmask_irq(struct irq_data *d) ...@@ -226,16 +228,26 @@ static void gic_unmask_irq(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d) static void gic_eoi_irq(struct irq_data *d)
{ {
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); u32 hwirq = gic_irq(d);
if (hwirq < 16)
hwirq = this_cpu_read(sgi_intid);
writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI);
} }
static void gic_eoimode1_eoi_irq(struct irq_data *d) static void gic_eoimode1_eoi_irq(struct irq_data *d)
{ {
u32 hwirq = gic_irq(d);
/* Do not deactivate an IRQ forwarded to a vcpu. */ /* Do not deactivate an IRQ forwarded to a vcpu. */
if (irqd_is_forwarded_to_vcpu(d)) if (irqd_is_forwarded_to_vcpu(d))
return; return;
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); if (hwirq < 16)
hwirq = this_cpu_read(sgi_intid);
writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
} }
static int gic_irq_set_irqchip_state(struct irq_data *d, static int gic_irq_set_irqchip_state(struct irq_data *d,
...@@ -295,7 +307,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) ...@@ -295,7 +307,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
/* Interrupt configuration for SGIs can't be changed */ /* Interrupt configuration for SGIs can't be changed */
if (gicirq < 16) if (gicirq < 16)
return -EINVAL; return type == IRQ_TYPE_EDGE_RISING ? 0 : -EINVAL;
/* SPIs have restrictions on the supported types */ /* SPIs have restrictions on the supported types */
if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
...@@ -315,7 +327,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) ...@@ -315,7 +327,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{ {
/* Only interrupts on the primary GIC can be forwarded to a vcpu. */ /* Only interrupts on the primary GIC can be forwarded to a vcpu. */
if (cascading_gic_irq(d)) if (cascading_gic_irq(d) || gic_irq(d) < 16)
return -EINVAL; return -EINVAL;
if (vcpu) if (vcpu)
...@@ -335,31 +347,33 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) ...@@ -335,31 +347,33 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
irqnr = irqstat & GICC_IAR_INT_ID_MASK; irqnr = irqstat & GICC_IAR_INT_ID_MASK;
if (likely(irqnr > 15 && irqnr < 1020)) { if (unlikely(irqnr >= 1020))
break;
if (static_branch_likely(&supports_deactivate_key)) if (static_branch_likely(&supports_deactivate_key))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
isb(); isb();
handle_domain_irq(gic->domain, irqnr, regs);
continue;
}
if (irqnr < 16) {
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
if (static_branch_likely(&supports_deactivate_key))
writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
#ifdef CONFIG_SMP
/* /*
* Ensure any shared data written by the CPU sending * Ensure any shared data written by the CPU sending the IPI
* the IPI is read after we've read the ACK register * is read after we've read the ACK register on the GIC.
* on the GIC.
* *
* Pairs with the write barrier in gic_raise_softirq * Pairs with the write barrier in gic_ipi_send_mask
*/ */
if (irqnr <= 15) {
smp_rmb(); smp_rmb();
handle_IPI(irqnr, regs);
#endif /*
continue; * The GIC encodes the source CPU in GICC_IAR,
* leading to the deactivation to fail if not
* written back as is to GICC_EOI. Stash the INTID
* away for gic_eoi_irq() to write back. This only
* works because we don't nest SGIs...
*/
this_cpu_write(sgi_intid, irqstat);
} }
break;
handle_domain_irq(gic->domain, irqnr, regs);
} while (1); } while (1);
} }
...@@ -793,14 +807,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ...@@ -793,14 +807,14 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
} }
static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
{ {
int cpu; int cpu;
unsigned long flags, map = 0; unsigned long flags, map = 0;
if (unlikely(nr_cpu_ids == 1)) { if (unlikely(nr_cpu_ids == 1)) {
/* Only one CPU? let's do a self-IPI... */ /* Only one CPU? let's do a self-IPI... */
writel_relaxed(2 << 24 | irq, writel_relaxed(2 << 24 | d->hwirq,
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
return; return;
} }
...@@ -818,7 +832,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) ...@@ -818,7 +832,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
dmb(ishst); dmb(ishst);
/* this always happens on GIC0 */ /* this always happens on GIC0 */
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
gic_unlock_irqrestore(flags); gic_unlock_irqrestore(flags);
} }
...@@ -831,14 +845,28 @@ static int gic_starting_cpu(unsigned int cpu) ...@@ -831,14 +845,28 @@ static int gic_starting_cpu(unsigned int cpu)
static __init void gic_smp_init(void) static __init void gic_smp_init(void)
{ {
set_smp_cross_call(gic_raise_softirq); struct irq_fwspec sgi_fwspec = {
.fwnode = gic_data[0].domain->fwnode,
.param_count = 1,
};
int base_sgi;
cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
"irqchip/arm/gic:starting", "irqchip/arm/gic:starting",
gic_starting_cpu, NULL); gic_starting_cpu, NULL);
base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8,
NUMA_NO_NODE, &sgi_fwspec,
false, NULL);
if (WARN_ON(base_sgi <= 0))
return;
set_smp_ipi_range(base_sgi, 8);
} }
#else #else
#define gic_smp_init() do { } while(0) #define gic_smp_init() do { } while(0)
#define gic_set_affinity NULL #define gic_set_affinity NULL
#define gic_ipi_send_mask NULL
#endif #endif
#ifdef CONFIG_BL_SWITCHER #ifdef CONFIG_BL_SWITCHER
...@@ -985,15 +1013,24 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, ...@@ -985,15 +1013,24 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
{ {
struct gic_chip_data *gic = d->host_data; struct gic_chip_data *gic = d->host_data;
if (hw < 32) { switch (hw) {
case 0 ... 15:
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_percpu_devid_fasteoi_ipi,
NULL, NULL);
break;
case 16 ... 31:
irq_set_percpu_devid(irq); irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL); handle_percpu_devid_irq, NULL, NULL);
} else { break;
default:
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data, irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_fasteoi_irq, NULL, NULL); handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq); irq_set_probe(irq);
irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq))); irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
break;
} }
return 0; return 0;
} }
...@@ -1007,19 +1044,26 @@ static int gic_irq_domain_translate(struct irq_domain *d, ...@@ -1007,19 +1044,26 @@ static int gic_irq_domain_translate(struct irq_domain *d,
unsigned long *hwirq, unsigned long *hwirq,
unsigned int *type) unsigned int *type)
{ {
if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
*hwirq = fwspec->param[0];
*type = IRQ_TYPE_EDGE_RISING;
return 0;
}
if (is_of_node(fwspec->fwnode)) { if (is_of_node(fwspec->fwnode)) {
if (fwspec->param_count < 3) if (fwspec->param_count < 3)
return -EINVAL; return -EINVAL;
/* Get the interrupt number and add 16 to skip over SGIs */ switch (fwspec->param[0]) {
case 0: /* SPI */
*hwirq = fwspec->param[1] + 32;
break;
case 1: /* PPI */
*hwirq = fwspec->param[1] + 16; *hwirq = fwspec->param[1] + 16;
break;
/* default:
* For SPIs, we need to add 16 more to get the GIC irq return -EINVAL;
* ID number }
*/
if (!fwspec->param[0])
*hwirq += 16;
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
...@@ -1088,8 +1132,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev, ...@@ -1088,8 +1132,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity; gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
} }
if (gic == &gic_data[0]) if (gic == &gic_data[0]) {
gic->chip.irq_set_affinity = gic_set_affinity; gic->chip.irq_set_affinity = gic_set_affinity;
gic->chip.ipi_send_mask = gic_ipi_send_mask;
}
} }
static int gic_init_bases(struct gic_chip_data *gic, static int gic_init_bases(struct gic_chip_data *gic,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment