Commit 7688c539 authored by Matt Redfearn's avatar Matt Redfearn Committed by Ralf Baechle

MIPS: smp.c: Introduce mechanism for freeing and allocating IPIs

For the MIPS remote processor implementation, we need additional IPIs to
talk to the remote processor. Since MIPS GIC reserves exactly the right
number of IPI IRQs required by Linux for the number of VPs in the
system, this is not possible without releasing some recources.

This commit introduces mips_smp_ipi_allocate() which allocates IPIs to a
given cpumask. It is called as normal with the cpu_possible_mask at
bootup to initialise IPIs to all CPUs. mips_smp_ipi_free() may then be
used to free IPIs to a subset of those CPUs so that their hardware
resources can be reused.
Signed-off-by: default avatarMatt Redfearn <matt.redfearn@imgtec.com>
Cc: Bjorn Andersson <bjorn.andersson@linaro.org>
Cc: Ohad Ben-Cohen <ohad@wizery.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Lisa Parratt <Lisa.Parratt@imgtec.com>
Cc: James Hogan <james.hogan@imgtec.com>
Cc: Qais Yousef <qsyousef@gmail.com>
Cc: Paul Burton <paul.burton@imgtec.com>
Cc: linux-mips@linux-mips.org
Cc: linux-remoteproc@vger.kernel.org
Cc: lisa.parratt@imgtec.com
Cc: linux-kernel@vger.kernel.org
Patchwork: https://patchwork.linux-mips.org/patch/14285/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent e710d666
...@@ -85,6 +85,20 @@ static inline void __cpu_die(unsigned int cpu) ...@@ -85,6 +85,20 @@ static inline void __cpu_die(unsigned int cpu)
extern void play_dead(void); extern void play_dead(void);
#endif #endif
/*
* This function will set up the necessary IPIs for Linux to communicate
* with the CPUs in mask.
* Return 0 on success.
*/
int mips_smp_ipi_allocate(const struct cpumask *mask);
/*
* This function will free up IPIs allocated with mips_smp_ipi_allocate to the
* CPUs in mask, which must be a subset of the IPIs that have been configured.
* Return 0 on success.
*/
int mips_smp_ipi_free(const struct cpumask *mask);
static inline void arch_send_call_function_single_ipi(int cpu) static inline void arch_send_call_function_single_ipi(int cpu)
{ {
extern struct plat_smp_ops *mp_ops; /* private */ extern struct plat_smp_ops *mp_ops; /* private */
......
...@@ -231,7 +231,7 @@ static struct irqaction irq_call = { ...@@ -231,7 +231,7 @@ static struct irqaction irq_call = {
.name = "IPI call" .name = "IPI call"
}; };
static __init void smp_ipi_init_one(unsigned int virq, static void smp_ipi_init_one(unsigned int virq,
struct irqaction *action) struct irqaction *action)
{ {
int ret; int ret;
...@@ -241,9 +241,11 @@ static __init void smp_ipi_init_one(unsigned int virq, ...@@ -241,9 +241,11 @@ static __init void smp_ipi_init_one(unsigned int virq,
BUG_ON(ret); BUG_ON(ret);
} }
static int __init mips_smp_ipi_init(void) static unsigned int call_virq, sched_virq;
int mips_smp_ipi_allocate(const struct cpumask *mask)
{ {
unsigned int call_virq, sched_virq; int virq;
struct irq_domain *ipidomain; struct irq_domain *ipidomain;
struct device_node *node; struct device_node *node;
...@@ -270,16 +272,20 @@ static int __init mips_smp_ipi_init(void) ...@@ -270,16 +272,20 @@ static int __init mips_smp_ipi_init(void)
if (!ipidomain) if (!ipidomain)
return 0; return 0;
call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); virq = irq_reserve_ipi(ipidomain, mask);
BUG_ON(!call_virq); BUG_ON(!virq);
if (!call_virq)
call_virq = virq;
sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); virq = irq_reserve_ipi(ipidomain, mask);
BUG_ON(!sched_virq); BUG_ON(!virq);
if (!sched_virq)
sched_virq = virq;
if (irq_domain_is_ipi_per_cpu(ipidomain)) { if (irq_domain_is_ipi_per_cpu(ipidomain)) {
int cpu; int cpu;
for_each_cpu(cpu, cpu_possible_mask) { for_each_cpu(cpu, mask) {
smp_ipi_init_one(call_virq + cpu, &irq_call); smp_ipi_init_one(call_virq + cpu, &irq_call);
smp_ipi_init_one(sched_virq + cpu, &irq_resched); smp_ipi_init_one(sched_virq + cpu, &irq_resched);
} }
...@@ -288,6 +294,45 @@ static int __init mips_smp_ipi_init(void) ...@@ -288,6 +294,45 @@ static int __init mips_smp_ipi_init(void)
smp_ipi_init_one(sched_virq, &irq_resched); smp_ipi_init_one(sched_virq, &irq_resched);
} }
return 0;
}
int mips_smp_ipi_free(const struct cpumask *mask)
{
struct irq_domain *ipidomain;
struct device_node *node;
node = of_irq_find_parent(of_root);
ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
/*
* Some platforms have half DT setup. So if we found irq node but
* didn't find an ipidomain, try to search for one that is not in the
* DT.
*/
if (node && !ipidomain)
ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
BUG_ON(!ipidomain);
if (irq_domain_is_ipi_per_cpu(ipidomain)) {
int cpu;
for_each_cpu(cpu, mask) {
remove_irq(call_virq + cpu, &irq_call);
remove_irq(sched_virq + cpu, &irq_resched);
}
}
irq_destroy_ipi(call_virq, mask);
irq_destroy_ipi(sched_virq, mask);
return 0;
}
static int __init mips_smp_ipi_init(void)
{
mips_smp_ipi_allocate(cpu_possible_mask);
call_desc = irq_to_desc(call_virq); call_desc = irq_to_desc(call_virq);
sched_desc = irq_to_desc(sched_virq); sched_desc = irq_to_desc(sched_virq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment