Commit bb9b428a authored by Thomas Gleixner's avatar Thomas Gleixner

genirq/irqdomain: Allow irq_domain_activate_irq() to fail

Allow irq_domain_activate_irq() to fail. This is required to support a
reservation and late vector assignment scheme.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJuergen Gross <jgross@suse.com>
Tested-by: default avatarYu Chen <yu.c.chen@intel.com>
Acked-by: default avatarJuergen Gross <jgross@suse.com>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Alok Kataria <akataria@vmware.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Rui Zhang <rui.zhang@intel.com>
Cc: "K. Y. Srinivasan" <kys@microsoft.com>
Cc: Arjan van de Ven <arjan@linux.intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Len Brown <lenb@kernel.org>
Link: https://lkml.kernel.org/r/20170913213152.933882227@linutronix.de
parent 72491643
......@@ -441,7 +441,7 @@ extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
unsigned int nr_irqs, int node, void *arg,
bool realloc, const struct cpumask *affinity);
extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs);
extern void irq_domain_activate_irq(struct irq_data *irq_data);
extern int irq_domain_activate_irq(struct irq_data *irq_data);
extern void irq_domain_deactivate_irq(struct irq_data *irq_data);
static inline int irq_domain_alloc_irqs(struct irq_domain *domain,
......
......@@ -219,7 +219,12 @@ __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force)
*/
return IRQ_STARTUP_ABORT;
}
irq_domain_activate_irq(d);
/*
* Managed interrupts have reserved resources, so this should not
* happen.
*/
if (WARN_ON(irq_domain_activate_irq(d)))
return IRQ_STARTUP_ABORT;
return IRQ_STARTUP_MANAGED;
}
#else
......@@ -285,7 +290,7 @@ int irq_activate(struct irq_desc *desc)
struct irq_data *d = irq_desc_get_irq_data(desc);
if (!irqd_affinity_is_managed(d))
irq_domain_activate_irq(d);
return irq_domain_activate_irq(d);
return 0;
}
......
......@@ -439,9 +439,10 @@ static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear)
#endif /* !CONFIG_GENERIC_PENDING_IRQ */
#if !defined(CONFIG_IRQ_DOMAIN) || !defined(CONFIG_IRQ_DOMAIN_HIERARCHY)
static inline void irq_domain_activate_irq(struct irq_data *data)
static inline int irq_domain_activate_irq(struct irq_data *data)
{
irqd_set_activated(data);
return 0;
}
static inline void irq_domain_deactivate_irq(struct irq_data *data)
{
......
......@@ -1682,28 +1682,35 @@ void irq_domain_free_irqs_parent(struct irq_domain *domain,
}
EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
static void __irq_domain_activate_irq(struct irq_data *irq_data)
static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
{
if (irq_data && irq_data->domain) {
struct irq_domain *domain = irq_data->domain;
if (domain->ops->deactivate)
domain->ops->deactivate(domain, irq_data);
if (irq_data->parent_data)
__irq_domain_activate_irq(irq_data->parent_data);
if (domain->ops->activate)
domain->ops->activate(domain, irq_data, false);
__irq_domain_deactivate_irq(irq_data->parent_data);
}
}
static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
static int __irq_domain_activate_irq(struct irq_data *irqd)
{
if (irq_data && irq_data->domain) {
struct irq_domain *domain = irq_data->domain;
int ret = 0;
if (domain->ops->deactivate)
domain->ops->deactivate(domain, irq_data);
if (irq_data->parent_data)
__irq_domain_deactivate_irq(irq_data->parent_data);
if (irqd && irqd->domain) {
struct irq_domain *domain = irqd->domain;
if (irqd->parent_data)
ret = __irq_domain_activate_irq(irqd->parent_data);
if (!ret && domain->ops->activate) {
ret = domain->ops->activate(domain, irqd, false);
/* Rollback in case of error */
if (ret && irqd->parent_data)
__irq_domain_deactivate_irq(irqd->parent_data);
}
}
return ret;
}
/**
......@@ -1714,12 +1721,15 @@ static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
* This is the second step to call domain_ops->activate to program interrupt
* controllers, so the interrupt could actually get delivered.
*/
void irq_domain_activate_irq(struct irq_data *irq_data)
int irq_domain_activate_irq(struct irq_data *irq_data)
{
if (!irqd_is_activated(irq_data)) {
__irq_domain_activate_irq(irq_data);
int ret = 0;
if (!irqd_is_activated(irq_data))
ret = __irq_domain_activate_irq(irq_data);
if (!ret)
irqd_set_activated(irq_data);
}
return ret;
}
/**
......
......@@ -401,11 +401,26 @@ int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
struct irq_data *irq_data;
irq_data = irq_domain_get_irq_data(domain, desc->irq);
irq_domain_activate_irq(irq_data);
ret = irq_domain_activate_irq(irq_data);
if (ret)
goto cleanup;
}
}
return 0;
cleanup:
for_each_msi_entry(desc, dev) {
struct irq_data *irqd;
if (desc->irq == virq)
break;
irqd = irq_domain_get_irq_data(domain, desc->irq);
if (irqd_is_activated(irqd))
irq_domain_deactivate_irq(irqd);
}
msi_domain_free_irqs(domain, dev);
return ret;
}
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment