Commit b8cb642a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull more irq updates from Thomas Gleixner:
 "The second part of irq related updates:

   - Provide EOImode for GIC[V3] irq chips, which is a prerequisite for
     direct interrupt handling in [KVM] guests"

* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irqchip/GIC: Fix EOImode setting for non-DT/ACPI systems
  irqchip/GIC: Don't deactivate interrupts forwarded to a guest
  irqchip/GIC: Convert to EOImode == 1
  irqchip/GICv3: Don't deactivate interrupts forwarded to a guest
  irqchip/GICv3: Convert to EOImode == 1
parents 1c8cc72d 4a6ac304
......@@ -31,6 +31,7 @@
#include <asm/cputype.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
#include <asm/virt.h>
#include "irq-gic-common.h"
......@@ -50,6 +51,7 @@ struct gic_chip_data {
};
static struct gic_chip_data gic_data __read_mostly;
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
......@@ -68,6 +70,11 @@ static inline int gic_irq_in_rdist(struct irq_data *d)
return gic_irq(d) < 32;
}
static inline bool forwarded_irq(struct irq_data *d)
{
return d->handler_data != NULL;
}
static inline void __iomem *gic_dist_base(struct irq_data *d)
{
if (gic_irq_in_rdist(d)) /* SGI+PPI -> SGI_base for this CPU */
......@@ -231,6 +238,21 @@ static void gic_mask_irq(struct irq_data *d)
gic_poke_irq(d, GICD_ICENABLER);
}
static void gic_eoimode1_mask_irq(struct irq_data *d)
{
gic_mask_irq(d);
/*
* When masking a forwarded interrupt, make sure it is
* deactivated as well.
*
* This ensures that an interrupt that is getting
* disabled/masked will not get "stuck", because there is
* noone to deactivate it (guest is being terminated).
*/
if (forwarded_irq(d))
gic_poke_irq(d, GICD_ICACTIVER);
}
static void gic_unmask_irq(struct irq_data *d)
{
gic_poke_irq(d, GICD_ISENABLER);
......@@ -296,6 +318,17 @@ static void gic_eoi_irq(struct irq_data *d)
gic_write_eoir(gic_irq(d));
}
static void gic_eoimode1_eoi_irq(struct irq_data *d)
{
/*
* No need to deactivate an LPI, or an interrupt that
* is is getting forwarded to a vcpu.
*/
if (gic_irq(d) >= 8192 || forwarded_irq(d))
return;
gic_write_dir(gic_irq(d));
}
static int gic_set_type(struct irq_data *d, unsigned int type)
{
unsigned int irq = gic_irq(d);
......@@ -322,6 +355,12 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
return gic_configure_irq(irq, type, base, rwp_wait);
}
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
d->handler_data = vcpu;
return 0;
}
static u64 gic_mpidr_to_affinity(u64 mpidr)
{
u64 aff;
......@@ -343,15 +382,26 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) {
int err;
if (static_key_true(&supports_deactivate))
gic_write_eoir(irqnr);
err = handle_domain_irq(gic_data.domain, irqnr, regs);
if (err) {
WARN_ONCE(true, "Unexpected interrupt received!\n");
gic_write_eoir(irqnr);
if (static_key_true(&supports_deactivate)) {
if (irqnr < 8192)
gic_write_dir(irqnr);
} else {
gic_write_eoir(irqnr);
}
}
continue;
}
if (irqnr < 16) {
gic_write_eoir(irqnr);
if (static_key_true(&supports_deactivate))
gic_write_dir(irqnr);
#ifdef CONFIG_SMP
handle_IPI(irqnr, regs);
#else
......@@ -451,8 +501,13 @@ static void gic_cpu_sys_reg_init(void)
/* Set priority mask register */
gic_write_pmr(DEFAULT_PMR_VALUE);
/* EOI deactivates interrupt too (mode 0) */
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
if (static_key_true(&supports_deactivate)) {
/* EOI drops priority only (mode 1) */
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
} else {
/* EOI deactivates interrupt too (mode 0) */
gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
}
/* ... and let's hit the road... */
gic_write_grpen1(1);
......@@ -661,11 +716,29 @@ static struct irq_chip gic_chip = {
.flags = IRQCHIP_SET_TYPE_MASKED,
};
static struct irq_chip gic_eoimode1_chip = {
.name = "GICv3",
.irq_mask = gic_eoimode1_mask_irq,
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoimode1_eoi_irq,
.irq_set_type = gic_set_type,
.irq_set_affinity = gic_set_affinity,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
.flags = IRQCHIP_SET_TYPE_MASKED,
};
#define GIC_ID_NR (1U << gic_data.rdists.id_bits)
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
struct irq_chip *chip = &gic_chip;
if (static_key_true(&supports_deactivate))
chip = &gic_eoimode1_chip;
/* SGIs are private to the core kernel */
if (hw < 16)
return -EPERM;
......@@ -679,13 +752,13 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
/* PPIs */
if (hw < 32) {
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
}
/* SPIs */
if (hw >= 32 && hw < gic_data.irq_nr) {
irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
......@@ -693,7 +766,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
if (hw >= 8192 && hw < GIC_ID_NR) {
if (!gic_dist_supports_lpis())
return -EPERM;
irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID);
}
......@@ -820,6 +893,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
redist_stride = 0;
if (!is_hyp_mode_available())
static_key_slow_dec(&supports_deactivate);
if (static_key_true(&supports_deactivate))
pr_info("GIC: Using split EOI/Deactivate mode\n");
gic_data.dist_base = dist_base;
gic_data.redist_regions = rdist_regs;
gic_data.nr_redist_regions = nr_redist_regions;
......
......@@ -47,6 +47,7 @@
#include <asm/irq.h>
#include <asm/exception.h>
#include <asm/smp_plat.h>
#include <asm/virt.h>
#include "irq-gic-common.h"
......@@ -82,6 +83,8 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
#define NR_GIC_CPU_IF 8
static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
#ifndef MAX_GIC_NR
#define MAX_GIC_NR 1
#endif
......@@ -137,6 +140,36 @@ static inline unsigned int gic_irq(struct irq_data *d)
return d->hwirq;
}
static inline bool cascading_gic_irq(struct irq_data *d)
{
void *data = irq_data_get_irq_handler_data(d);
/*
* If handler_data pointing to one of the secondary GICs, then
* this is a cascading interrupt, and it cannot possibly be
* forwarded.
*/
if (data >= (void *)(gic_data + 1) &&
data < (void *)(gic_data + MAX_GIC_NR))
return true;
return false;
}
static inline bool forwarded_irq(struct irq_data *d)
{
/*
* A forwarded interrupt:
* - is on the primary GIC
* - has its handler_data set to a value
* - that isn't a secondary GIC
*/
if (d->handler_data && !cascading_gic_irq(d))
return true;
return false;
}
/*
* Routines to acknowledge, disable and enable interrupts
*/
......@@ -157,6 +190,21 @@ static void gic_mask_irq(struct irq_data *d)
gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
}
static void gic_eoimode1_mask_irq(struct irq_data *d)
{
gic_mask_irq(d);
/*
* When masking a forwarded interrupt, make sure it is
* deactivated as well.
*
* This ensures that an interrupt that is getting
* disabled/masked will not get "stuck", because there is
* noone to deactivate it (guest is being terminated).
*/
if (forwarded_irq(d))
gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR);
}
static void gic_unmask_irq(struct irq_data *d)
{
gic_poke_irq(d, GIC_DIST_ENABLE_SET);
......@@ -167,6 +215,15 @@ static void gic_eoi_irq(struct irq_data *d)
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
}
static void gic_eoimode1_eoi_irq(struct irq_data *d)
{
/* Do not deactivate an IRQ forwarded to a vcpu. */
if (forwarded_irq(d))
return;
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
}
static int gic_irq_set_irqchip_state(struct irq_data *d,
enum irqchip_irq_state which, bool val)
{
......@@ -233,6 +290,16 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
return gic_configure_irq(gicirq, type, base, NULL);
}
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
if (cascading_gic_irq(d))
return -EINVAL;
d->handler_data = vcpu;
return 0;
}
#ifdef CONFIG_SMP
static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
......@@ -272,11 +339,15 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
if (likely(irqnr > 15 && irqnr < 1021)) {
if (static_key_true(&supports_deactivate))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
handle_domain_irq(gic->domain, irqnr, regs);
continue;
}
if (irqnr < 16) {
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
if (static_key_true(&supports_deactivate))
writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
#ifdef CONFIG_SMP
handle_IPI(irqnr, regs);
#endif
......@@ -329,6 +400,23 @@ static struct irq_chip gic_chip = {
IRQCHIP_MASK_ON_SUSPEND,
};
static struct irq_chip gic_eoimode1_chip = {
.name = "GICv2",
.irq_mask = gic_eoimode1_mask_irq,
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoimode1_eoi_irq,
.irq_set_type = gic_set_type,
#ifdef CONFIG_SMP
.irq_set_affinity = gic_set_affinity,
#endif
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
.flags = IRQCHIP_SET_TYPE_MASKED |
IRQCHIP_SKIP_SET_WAKE |
IRQCHIP_MASK_ON_SUSPEND,
};
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
{
if (gic_nr >= MAX_GIC_NR)
......@@ -360,6 +448,10 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
{
void __iomem *cpu_base = gic_data_cpu_base(gic);
u32 bypass = 0;
u32 mode = 0;
if (static_key_true(&supports_deactivate))
mode = GIC_CPU_CTRL_EOImodeNS;
/*
* Preserve bypass disable bits to be written back later
......@@ -367,7 +459,7 @@ static void gic_cpu_if_up(struct gic_chip_data *gic)
bypass = readl(cpu_base + GIC_CPU_CTRL);
bypass &= GICC_DIS_BYPASS_MASK;
writel_relaxed(bypass | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL);
}
......@@ -803,13 +895,20 @@ void __init gic_init_physaddr(struct device_node *node)
static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
struct irq_chip *chip = &gic_chip;
if (static_key_true(&supports_deactivate)) {
if (d->host_data == (void *)&gic_data[0])
chip = &gic_eoimode1_chip;
}
if (hw < 32) {
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
} else {
irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data,
irq_domain_set_info(d, irq, hw, chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
}
......@@ -894,7 +993,7 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
.xlate = gic_irq_domain_xlate,
};
void __init gic_init_bases(unsigned int gic_nr, int irq_start,
static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base,
u32 percpu_offset, struct device_node *node)
{
......@@ -995,6 +1094,8 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
register_cpu_notifier(&gic_cpu_notifier);
#endif
set_handle_irq(gic_handle_irq);
if (static_key_true(&supports_deactivate))
pr_info("GIC: Using split EOI/Deactivate mode\n");
}
gic_dist_init(gic);
......@@ -1002,6 +1103,19 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
gic_pm_init(gic);
}
void __init gic_init_bases(unsigned int gic_nr, int irq_start,
void __iomem *dist_base, void __iomem *cpu_base,
u32 percpu_offset, struct device_node *node)
{
/*
* Non-DT/ACPI systems won't run a hypervisor, so let's not
* bother with these...
*/
static_key_slow_dec(&supports_deactivate);
__gic_init_bases(gic_nr, irq_start, dist_base, cpu_base,
percpu_offset, node);
}
#ifdef CONFIG_OF
static int gic_cnt __initdata;
......@@ -1010,6 +1124,7 @@ gic_of_init(struct device_node *node, struct device_node *parent)
{
void __iomem *cpu_base;
void __iomem *dist_base;
struct resource cpu_res;
u32 percpu_offset;
int irq;
......@@ -1022,10 +1137,20 @@ gic_of_init(struct device_node *node, struct device_node *parent)
cpu_base = of_iomap(node, 1);
WARN(!cpu_base, "unable to map gic cpu registers\n");
of_address_to_resource(node, 1, &cpu_res);
/*
* Disable split EOI/Deactivate if either HYP is not available
* or the CPU interface is too small.
*/
if (gic_cnt == 0 && (!is_hyp_mode_available() ||
resource_size(&cpu_res) < SZ_8K))
static_key_slow_dec(&supports_deactivate);
if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
percpu_offset = 0;
gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
__gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
if (!gic_cnt)
gic_init_physaddr(node);
......@@ -1140,12 +1265,20 @@ gic_v2_acpi_init(struct acpi_table_header *table)
return -ENOMEM;
}
/*
* Disable split EOI/Deactivate if HYP is not available. ACPI
* guarantees that we'll always have a GICv2, so the CPU
* interface will always be the right size.
*/
if (!is_hyp_mode_available())
static_key_slow_dec(&supports_deactivate);
/*
* Initialize zero GIC instance (no multi-GIC support). Also, set GIC
* as default IRQ domain to allow for GSI registration and GSI to IRQ
* number translation (see acpi_register_gsi() and acpi_gsi_to_irq()).
*/
gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
__gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL);
irq_set_default_host(gic_data[0].domain);
acpi_irq_model = ACPI_IRQ_MODEL_GIC;
......
......@@ -104,6 +104,8 @@
#define GICR_SYNCR 0x00C0
#define GICR_MOVLPIR 0x0100
#define GICR_MOVALLR 0x0110
#define GICR_ISACTIVER GICD_ISACTIVER
#define GICR_ICACTIVER GICD_ICACTIVER
#define GICR_IDREGS GICD_IDREGS
#define GICR_PIDR2 GICD_PIDR2
......@@ -288,6 +290,7 @@
#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT)
#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1)
#define ICC_DIR_EL1 sys_reg(3, 0, 12, 11, 1)
#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0)
#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5)
#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0)
......@@ -385,6 +388,12 @@ static inline void gic_write_eoir(u64 irq)
isb();
}
static inline void gic_write_dir(u64 irq)
{
asm volatile("msr_s " __stringify(ICC_DIR_EL1) ", %0" : : "r" (irq));
isb();
}
struct irq_domain;
int its_cpu_init(void);
int its_init(struct device_node *node, struct rdists *rdists,
......
......@@ -20,9 +20,13 @@
#define GIC_CPU_ALIAS_BINPOINT 0x1c
#define GIC_CPU_ACTIVEPRIO 0xd0
#define GIC_CPU_IDENT 0xfc
#define GIC_CPU_DEACTIVATE 0x1000
#define GICC_ENABLE 0x1
#define GICC_INT_PRI_THRESHOLD 0xf0
#define GIC_CPU_CTRL_EOImodeNS (1 << 9)
#define GICC_IAR_INT_ID_MASK 0x3ff
#define GICC_INT_SPURIOUS 1023
#define GICC_DIS_BYPASS_MASK 0x1e0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment