Commit 7cb7c32d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'irq_urgent_for_v6.8_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Borislav Petkov:

 - Fix GICv4.1 affinity update

 - Restore a quirk for ACPI-based GICv4 systems

 - Handle non-coherent GICv4 redistributors properly

 - Prevent spurious interrupts on Broadcom devices using GIC v3
   architecture

 - Other minor fixes

* tag 'irq_urgent_for_v6.8_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irqchip/gic-v3-its: Fix GICv4.1 VPE affinity update
  irqchip/gic-v3-its: Restore quirk probing for ACPI-based systems
  irqchip/gic-v3-its: Handle non-coherent GICv4 redistributors
  irqchip/qcom-mpm: Fix IS_ERR() vs NULL check in qcom_mpm_init()
  irqchip/loongson-eiointc: Use correct struct type in eiointc_domain_alloc()
  irqchip/irq-brcmstb-l2: Add write memory barrier before exit
parents 626721ed af9acbfc
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* /*
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver * Generic Broadcom Set Top Box Level 2 Interrupt controller driver
* *
* Copyright (C) 2014-2017 Broadcom * Copyright (C) 2014-2024 Broadcom
*/ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
...@@ -112,6 +112,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) ...@@ -112,6 +112,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
generic_handle_domain_irq(b->domain, irq); generic_handle_domain_irq(b->domain, irq);
} while (status); } while (status);
out: out:
/* Don't ack parent before all device writes are done */
wmb();
chained_irq_exit(chip, desc); chained_irq_exit(chip, desc);
} }
......
...@@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) ...@@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
} }
static bool rdists_support_shareable(void)
{
return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
}
static u16 get_its_list(struct its_vm *vm) static u16 get_its_list(struct its_vm *vm)
{ {
struct its_node *its; struct its_node *its;
...@@ -2710,10 +2715,12 @@ static u64 inherit_vpe_l1_table_from_its(void) ...@@ -2710,10 +2715,12 @@ static u64 inherit_vpe_l1_table_from_its(void)
break; break;
} }
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, if (rdists_support_shareable()) {
FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
return val; return val;
...@@ -2936,8 +2943,10 @@ static int allocate_vpe_l1_table(void) ...@@ -2936,8 +2943,10 @@ static int allocate_vpe_l1_table(void)
WARN_ON(!IS_ALIGNED(pa, psz)); WARN_ON(!IS_ALIGNED(pa, psz));
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
val |= GICR_VPROPBASER_RaWb; if (rdists_support_shareable()) {
val |= GICR_VPROPBASER_InnerShareable; val |= GICR_VPROPBASER_RaWb;
val |= GICR_VPROPBASER_InnerShareable;
}
val |= GICR_VPROPBASER_4_1_Z; val |= GICR_VPROPBASER_4_1_Z;
val |= GICR_VPROPBASER_4_1_VALID; val |= GICR_VPROPBASER_4_1_VALID;
...@@ -3126,7 +3135,7 @@ static void its_cpu_init_lpis(void) ...@@ -3126,7 +3135,7 @@ static void its_cpu_init_lpis(void)
gicr_write_propbaser(val, rbase + GICR_PROPBASER); gicr_write_propbaser(val, rbase + GICR_PROPBASER);
tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) if (!rdists_support_shareable())
tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
...@@ -3153,7 +3162,7 @@ static void its_cpu_init_lpis(void) ...@@ -3153,7 +3162,7 @@ static void its_cpu_init_lpis(void)
gicr_write_pendbaser(val, rbase + GICR_PENDBASER); gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE) if (!rdists_support_shareable())
tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
...@@ -3817,8 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d, ...@@ -3817,8 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force) bool force)
{ {
struct its_vpe *vpe = irq_data_get_irq_chip_data(d); struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
int from, cpu = cpumask_first(mask_val); struct cpumask common, *table_mask;
unsigned long flags; unsigned long flags;
int from, cpu;
/* /*
* Changing affinity is mega expensive, so let's be as lazy as * Changing affinity is mega expensive, so let's be as lazy as
...@@ -3834,19 +3844,22 @@ static int its_vpe_set_affinity(struct irq_data *d, ...@@ -3834,19 +3844,22 @@ static int its_vpe_set_affinity(struct irq_data *d,
* taken on any vLPI handling path that evaluates vpe->col_idx. * taken on any vLPI handling path that evaluates vpe->col_idx.
*/ */
from = vpe_to_cpuid_lock(vpe, &flags); from = vpe_to_cpuid_lock(vpe, &flags);
if (from == cpu) table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
goto out;
vpe->col_idx = cpu;
/* /*
* GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD * If we are offered another CPU in the same GICv4.1 ITS
* is sharing its VPE table with the current one. * affinity, pick this one. Otherwise, any CPU will do.
*/ */
if (gic_data_rdist_cpu(cpu)->vpe_table_mask && if (table_mask && cpumask_and(&common, mask_val, table_mask))
cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask)) cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
else
cpu = cpumask_first(mask_val);
if (from == cpu)
goto out; goto out;
vpe->col_idx = cpu;
its_send_vmovp(vpe); its_send_vmovp(vpe);
its_vpe_db_proxy_move(vpe, from, cpu); its_vpe_db_proxy_move(vpe, from, cpu);
...@@ -3880,14 +3893,18 @@ static void its_vpe_schedule(struct its_vpe *vpe) ...@@ -3880,14 +3893,18 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
GENMASK_ULL(51, 12); GENMASK_ULL(51, 12);
val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
val |= GICR_VPROPBASER_RaWb; if (rdists_support_shareable()) {
val |= GICR_VPROPBASER_InnerShareable; val |= GICR_VPROPBASER_RaWb;
val |= GICR_VPROPBASER_InnerShareable;
}
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = virt_to_phys(page_address(vpe->vpt_page)) & val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16); GENMASK_ULL(51, 16);
val |= GICR_VPENDBASER_RaWaWb; if (rdists_support_shareable()) {
val |= GICR_VPENDBASER_InnerShareable; val |= GICR_VPENDBASER_RaWaWb;
val |= GICR_VPENDBASER_InnerShareable;
}
/* /*
* There is no good way of finding out if the pending table is * There is no good way of finding out if the pending table is
* empty as we can race against the doorbell interrupt very * empty as we can race against the doorbell interrupt very
...@@ -5078,6 +5095,8 @@ static int __init its_probe_one(struct its_node *its) ...@@ -5078,6 +5095,8 @@ static int __init its_probe_one(struct its_node *its)
u32 ctlr; u32 ctlr;
int err; int err;
its_enable_quirks(its);
if (is_v4(its)) { if (is_v4(its)) {
if (!(its->typer & GITS_TYPER_VMOVP)) { if (!(its->typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(its); err = its_compute_its_list_map(its);
...@@ -5429,7 +5448,6 @@ static int __init its_of_probe(struct device_node *node) ...@@ -5429,7 +5448,6 @@ static int __init its_of_probe(struct device_node *node)
if (!its) if (!its)
return -ENOMEM; return -ENOMEM;
its_enable_quirks(its);
err = its_probe_one(its); err = its_probe_one(its);
if (err) { if (err) {
its_node_destroy(its); its_node_destroy(its);
......
...@@ -241,7 +241,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -241,7 +241,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
int ret; int ret;
unsigned int i, type; unsigned int i, type;
unsigned long hwirq = 0; unsigned long hwirq = 0;
struct eiointc *priv = domain->host_data; struct eiointc_priv *priv = domain->host_data;
ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type); ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
if (ret) if (ret)
......
...@@ -389,8 +389,8 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent) ...@@ -389,8 +389,8 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
/* Don't use devm_ioremap_resource, as we're accessing a shared region. */ /* Don't use devm_ioremap_resource, as we're accessing a shared region. */
priv->base = devm_ioremap(dev, res.start, resource_size(&res)); priv->base = devm_ioremap(dev, res.start, resource_size(&res));
of_node_put(msgram_np); of_node_put(msgram_np);
if (IS_ERR(priv->base)) if (!priv->base)
return PTR_ERR(priv->base); return -ENOMEM;
} else { } else {
/* Otherwise, fall back to simple MMIO. */ /* Otherwise, fall back to simple MMIO. */
priv->base = devm_platform_ioremap_resource(pdev, 0); priv->base = devm_platform_ioremap_resource(pdev, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment