Commit 04c8b0f8 authored by Marc Zyngier's avatar Marc Zyngier

irqchip/gic: Make locking a BL_SWITCHER only feature

The BL switcher code manipulates the logical/physical CPU mapping,
forcing a lock to be taken on the IPI path. With an IPI heavy load,
this single lock becomes contended.

But when CONFIG_BL_SWITCHER is not enabled, there is no reason
to take this lock at all since the CPU mapping is immutable.

This patch allows the lock to be entierely removed when BL_SWITCHER
is not enabled (which is the case in most configurations), leading
to a small improvement of "perf bench sched pipe" (measured on
an 8 core AMD Seattle system):

Before: 101370 ops/sec
After:  103680 ops/sec

Take this opportunity to remove a useless lock being taken when
handling an interrupt on a secondary GIC.
Signed-off-by: default avatarMarc Zyngier <marc.zyngier@arm.com>
parent 9395452b
...@@ -91,7 +91,27 @@ struct gic_chip_data { ...@@ -91,7 +91,27 @@ struct gic_chip_data {
#endif #endif
}; };
static DEFINE_RAW_SPINLOCK(irq_controller_lock); #ifdef CONFIG_BL_SWITCHER
static DEFINE_RAW_SPINLOCK(cpu_map_lock);
#define gic_lock_irqsave(f) \
raw_spin_lock_irqsave(&cpu_map_lock, (f))
#define gic_unlock_irqrestore(f) \
raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
#define gic_lock() raw_spin_lock(&cpu_map_lock)
#define gic_unlock() raw_spin_unlock(&cpu_map_lock)
#else
#define gic_lock_irqsave(f) do { (void)(f); } while(0)
#define gic_unlock_irqrestore(f) do { (void)(f); } while(0)
#define gic_lock() do { } while(0)
#define gic_unlock() do { } while(0)
#endif
/* /*
* The GIC mapping of CPU interfaces does not necessarily match * The GIC mapping of CPU interfaces does not necessarily match
...@@ -317,12 +337,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, ...@@ -317,12 +337,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
return -EINVAL; return -EINVAL;
raw_spin_lock_irqsave(&irq_controller_lock, flags); gic_lock_irqsave(flags);
mask = 0xff << shift; mask = 0xff << shift;
bit = gic_cpu_map[cpu] << shift; bit = gic_cpu_map[cpu] << shift;
val = readl_relaxed(reg) & ~mask; val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg); writel_relaxed(val | bit, reg);
raw_spin_unlock_irqrestore(&irq_controller_lock, flags); gic_unlock_irqrestore(flags);
return IRQ_SET_MASK_OK_DONE; return IRQ_SET_MASK_OK_DONE;
} }
...@@ -374,9 +394,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc) ...@@ -374,9 +394,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
chained_irq_enter(chip, desc); chained_irq_enter(chip, desc);
raw_spin_lock(&irq_controller_lock);
status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
raw_spin_unlock(&irq_controller_lock);
gic_irq = (status & GICC_IAR_INT_ID_MASK); gic_irq = (status & GICC_IAR_INT_ID_MASK);
if (gic_irq == GICC_INT_SPURIOUS) if (gic_irq == GICC_INT_SPURIOUS)
...@@ -776,7 +794,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) ...@@ -776,7 +794,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
return; return;
} }
raw_spin_lock_irqsave(&irq_controller_lock, flags); gic_lock_irqsave(flags);
/* Convert our logical CPU mask into a physical one. */ /* Convert our logical CPU mask into a physical one. */
for_each_cpu(cpu, mask) for_each_cpu(cpu, mask)
...@@ -791,7 +809,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) ...@@ -791,7 +809,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
/* this always happens on GIC0 */ /* this always happens on GIC0 */
writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
raw_spin_unlock_irqrestore(&irq_controller_lock, flags); gic_unlock_irqrestore(flags);
} }
#endif #endif
...@@ -859,7 +877,7 @@ void gic_migrate_target(unsigned int new_cpu_id) ...@@ -859,7 +877,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
cur_target_mask = 0x01010101 << cur_cpu_id; cur_target_mask = 0x01010101 << cur_cpu_id;
ror_val = (cur_cpu_id - new_cpu_id) & 31; ror_val = (cur_cpu_id - new_cpu_id) & 31;
raw_spin_lock(&irq_controller_lock); gic_lock();
/* Update the target interface for this logical CPU */ /* Update the target interface for this logical CPU */
gic_cpu_map[cpu] = 1 << new_cpu_id; gic_cpu_map[cpu] = 1 << new_cpu_id;
...@@ -879,7 +897,7 @@ void gic_migrate_target(unsigned int new_cpu_id) ...@@ -879,7 +897,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
} }
} }
raw_spin_unlock(&irq_controller_lock); gic_unlock();
/* /*
* Now let's migrate and clear any potential SGIs that might be * Now let's migrate and clear any potential SGIs that might be
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment