Commit 7cb7c32d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'irq_urgent_for_v6.8_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Borislav Petkov:

 - Fix GICv4.1 affinity update

 - Restore a quirk for ACPI-based GICv4 systems

 - Handle non-coherent GICv4 redistributors properly

 - Prevent spurious interrupts on Broadcom devices using GIC v3
   architecture

 - Other minor fixes

* tag 'irq_urgent_for_v6.8_rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irqchip/gic-v3-its: Fix GICv4.1 VPE affinity update
  irqchip/gic-v3-its: Restore quirk probing for ACPI-based systems
  irqchip/gic-v3-its: Handle non-coherent GICv4 redistributors
  irqchip/qcom-mpm: Fix IS_ERR() vs NULL check in qcom_mpm_init()
  irqchip/loongson-eiointc: Use correct struct type in eiointc_domain_alloc()
  irqchip/irq-brcmstb-l2: Add write memory barrier before exit
parents 626721ed af9acbfc
......@@ -2,7 +2,7 @@
/*
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
*
* Copyright (C) 2014-2017 Broadcom
* Copyright (C) 2014-2024 Broadcom
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
......@@ -112,6 +112,9 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
generic_handle_domain_irq(b->domain, irq);
} while (status);
out:
/* Don't ack parent before all device writes are done */
wmb();
chained_irq_exit(chip, desc);
}
......
......@@ -207,6 +207,11 @@ static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
}
static bool rdists_support_shareable(void)
{
return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE);
}
static u16 get_its_list(struct its_vm *vm)
{
struct its_node *its;
......@@ -2710,10 +2715,12 @@ static u64 inherit_vpe_l1_table_from_its(void)
break;
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
if (rdists_support_shareable()) {
val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
}
val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
return val;
......@@ -2936,8 +2943,10 @@ static int allocate_vpe_l1_table(void)
WARN_ON(!IS_ALIGNED(pa, psz));
val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
val |= GICR_VPROPBASER_RaWb;
val |= GICR_VPROPBASER_InnerShareable;
if (rdists_support_shareable()) {
val |= GICR_VPROPBASER_RaWb;
val |= GICR_VPROPBASER_InnerShareable;
}
val |= GICR_VPROPBASER_4_1_Z;
val |= GICR_VPROPBASER_4_1_VALID;
......@@ -3126,7 +3135,7 @@ static void its_cpu_init_lpis(void)
gicr_write_propbaser(val, rbase + GICR_PROPBASER);
tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
if (!rdists_support_shareable())
tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK;
if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
......@@ -3153,7 +3162,7 @@ static void its_cpu_init_lpis(void)
gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
if (gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE)
if (!rdists_support_shareable())
tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK;
if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
......@@ -3817,8 +3826,9 @@ static int its_vpe_set_affinity(struct irq_data *d,
bool force)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
int from, cpu = cpumask_first(mask_val);
struct cpumask common, *table_mask;
unsigned long flags;
int from, cpu;
/*
* Changing affinity is mega expensive, so let's be as lazy as
......@@ -3834,19 +3844,22 @@ static int its_vpe_set_affinity(struct irq_data *d,
* taken on any vLPI handling path that evaluates vpe->col_idx.
*/
from = vpe_to_cpuid_lock(vpe, &flags);
if (from == cpu)
goto out;
vpe->col_idx = cpu;
table_mask = gic_data_rdist_cpu(from)->vpe_table_mask;
/*
* GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
* is sharing its VPE table with the current one.
* If we are offered another CPU in the same GICv4.1 ITS
* affinity, pick this one. Otherwise, any CPU will do.
*/
if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
if (table_mask && cpumask_and(&common, mask_val, table_mask))
cpu = cpumask_test_cpu(from, &common) ? from : cpumask_first(&common);
else
cpu = cpumask_first(mask_val);
if (from == cpu)
goto out;
vpe->col_idx = cpu;
its_send_vmovp(vpe);
its_vpe_db_proxy_move(vpe, from, cpu);
......@@ -3880,14 +3893,18 @@ static void its_vpe_schedule(struct its_vpe *vpe)
val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
GENMASK_ULL(51, 12);
val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
val |= GICR_VPROPBASER_RaWb;
val |= GICR_VPROPBASER_InnerShareable;
if (rdists_support_shareable()) {
val |= GICR_VPROPBASER_RaWb;
val |= GICR_VPROPBASER_InnerShareable;
}
gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
val = virt_to_phys(page_address(vpe->vpt_page)) &
GENMASK_ULL(51, 16);
val |= GICR_VPENDBASER_RaWaWb;
val |= GICR_VPENDBASER_InnerShareable;
if (rdists_support_shareable()) {
val |= GICR_VPENDBASER_RaWaWb;
val |= GICR_VPENDBASER_InnerShareable;
}
/*
* There is no good way of finding out if the pending table is
* empty as we can race against the doorbell interrupt very
......@@ -5078,6 +5095,8 @@ static int __init its_probe_one(struct its_node *its)
u32 ctlr;
int err;
its_enable_quirks(its);
if (is_v4(its)) {
if (!(its->typer & GITS_TYPER_VMOVP)) {
err = its_compute_its_list_map(its);
......@@ -5429,7 +5448,6 @@ static int __init its_of_probe(struct device_node *node)
if (!its)
return -ENOMEM;
its_enable_quirks(its);
err = its_probe_one(its);
if (err) {
its_node_destroy(its);
......
......@@ -241,7 +241,7 @@ static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
int ret;
unsigned int i, type;
unsigned long hwirq = 0;
struct eiointc *priv = domain->host_data;
struct eiointc_priv *priv = domain->host_data;
ret = irq_domain_translate_onecell(domain, arg, &hwirq, &type);
if (ret)
......
......@@ -389,8 +389,8 @@ static int qcom_mpm_init(struct device_node *np, struct device_node *parent)
/* Don't use devm_ioremap_resource, as we're accessing a shared region. */
priv->base = devm_ioremap(dev, res.start, resource_size(&res));
of_node_put(msgram_np);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
if (!priv->base)
return -ENOMEM;
} else {
/* Otherwise, fall back to simple MMIO. */
priv->base = devm_platform_ioremap_resource(pdev, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment