Commit eed7d30e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Ingo Molnar:
 "Diverse irqchip driver fixes"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irqchip/gic-v3-its: Fix command queue pointer comparison bug
  irqchip/mips-gic: Use the correct local interrupt map registers
  irqchip/ti-sci-inta: Fix kernel crash if irq_create_fwspec_mapping fail
  irqchip/irq-csky-mpintc: Support auto irq deliver to all cpus
parents a7211bc9 a52548dd
......@@ -310,6 +310,36 @@ static inline bool mips_gic_present(void)
return IS_ENABLED(CONFIG_MIPS_GIC) && mips_gic_base;
}
/**
* mips_gic_vx_map_reg() - Return GIC_Vx_<intr>_MAP register offset
* @intr: A GIC local interrupt
*
* Determine the index of the GIC_VL_<intr>_MAP or GIC_VO_<intr>_MAP register
* within the block of GIC map registers. This is almost the same as the order
* of interrupts in the pending & mask registers, as used by enum
* mips_gic_local_interrupt, but moves the FDC interrupt & thus offsets the
* interrupts after it...
*
* Return: The map register index corresponding to @intr.
*
* The return value is suitable for use with the (read|write)_gic_v[lo]_map
* accessor functions.
*/
static inline unsigned int
mips_gic_vx_map_reg(enum mips_gic_local_interrupt intr)
{
/* WD, Compare & Timer are 1:1 */
if (intr <= GIC_LOCAL_INT_TIMER)
return intr;
/* FDC moves to after Timer... */
if (intr == GIC_LOCAL_INT_FDC)
return GIC_LOCAL_INT_TIMER + 1;
/* As a result everything else is offset by 1 */
return intr + 1;
}
/**
* gic_get_c0_compare_int() - Return cp0 count/compare interrupt virq
*
......
......@@ -89,7 +89,18 @@ static int csky_irq_set_affinity(struct irq_data *d,
if (cpu >= nr_cpu_ids)
return -EINVAL;
/* Enable interrupt destination */
/*
* The csky,mpintc could support auto irq deliver, but it only
* could deliver external irq to one cpu or all cpus. So it
* doesn't support deliver external irq to a group of cpus
* with cpu_mask.
* SO we only use auto deliver mode when affinity mask_val is
* equal to cpu_present_mask.
*
*/
if (cpumask_equal(mask_val, cpu_present_mask))
cpu = 0;
else
cpu |= BIT(31);
writel_relaxed(cpu, INTCG_base + INTCG_CIDSTR + offset);
......
......@@ -733,32 +733,43 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
}
static int its_wait_for_range_completion(struct its_node *its,
struct its_cmd_block *from,
u64 prev_idx,
struct its_cmd_block *to)
{
u64 rd_idx, from_idx, to_idx;
u64 rd_idx, to_idx, linear_idx;
u32 count = 1000000; /* 1s! */
from_idx = its_cmd_ptr_to_offset(its, from);
/* Linearize to_idx if the command set has wrapped around */
to_idx = its_cmd_ptr_to_offset(its, to);
if (to_idx < prev_idx)
to_idx += ITS_CMD_QUEUE_SZ;
linear_idx = prev_idx;
while (1) {
s64 delta;
rd_idx = readl_relaxed(its->base + GITS_CREADR);
/* Direct case */
if (from_idx < to_idx && rd_idx >= to_idx)
break;
/*
* Compute the read pointer progress, taking the
* potential wrap-around into account.
*/
delta = rd_idx - prev_idx;
if (rd_idx < prev_idx)
delta += ITS_CMD_QUEUE_SZ;
/* Wrapped case */
if (from_idx >= to_idx && rd_idx >= to_idx && rd_idx < from_idx)
linear_idx += delta;
if (linear_idx >= to_idx)
break;
count--;
if (!count) {
pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
from_idx, to_idx, rd_idx);
pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
to_idx, linear_idx);
return -1;
}
prev_idx = rd_idx;
cpu_relax();
udelay(1);
}
......@@ -775,6 +786,7 @@ void name(struct its_node *its, \
struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
synctype *sync_obj; \
unsigned long flags; \
u64 rd_idx; \
\
raw_spin_lock_irqsave(&its->lock, flags); \
\
......@@ -796,10 +808,11 @@ void name(struct its_node *its, \
} \
\
post: \
rd_idx = readl_relaxed(its->base + GITS_CREADR); \
next_cmd = its_post_commands(its); \
raw_spin_unlock_irqrestore(&its->lock, flags); \
\
if (its_wait_for_range_completion(its, cmd, next_cmd)) \
if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
}
......
......@@ -388,7 +388,7 @@ static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
cd = irq_data_get_irq_chip_data(d);
write_gic_vl_map(intr, cd->map);
write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map);
if (cd->mask)
write_gic_vl_smask(BIT(intr));
}
......@@ -517,7 +517,7 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
spin_lock_irqsave(&gic_lock, flags);
for_each_online_cpu(cpu) {
write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_map(intr, map);
write_gic_vo_map(mips_gic_vx_map_reg(intr), map);
}
spin_unlock_irqrestore(&gic_lock, flags);
......
......@@ -159,9 +159,9 @@ static struct ti_sci_inta_vint_desc *ti_sci_inta_alloc_parent_irq(struct irq_dom
parent_fwspec.param[1] = vint_desc->vint_id;
parent_virq = irq_create_fwspec_mapping(&parent_fwspec);
if (parent_virq <= 0) {
if (parent_virq == 0) {
kfree(vint_desc);
return ERR_PTR(parent_virq);
return ERR_PTR(-EINVAL);
}
vint_desc->parent_virq = parent_virq;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment