Commit f3877047 authored by Paul Mundt's avatar Paul Mundt

Merge branch 'common/irqdomain' into sh-latest

parents 1ca8fe38 1d6a21b0
...@@ -93,6 +93,7 @@ Linux IRQ number into the hardware. ...@@ -93,6 +93,7 @@ Linux IRQ number into the hardware.
Most drivers cannot use this mapping. Most drivers cannot use this mapping.
==== Legacy ==== ==== Legacy ====
irq_domain_add_simple()
irq_domain_add_legacy() irq_domain_add_legacy()
irq_domain_add_legacy_isa() irq_domain_add_legacy_isa()
...@@ -115,3 +116,7 @@ The legacy map should only be used if fixed IRQ mappings must be ...@@ -115,3 +116,7 @@ The legacy map should only be used if fixed IRQ mappings must be
supported. For example, ISA controllers would use the legacy map for supported. For example, ISA controllers would use the legacy map for
mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ
numbers. numbers.
Most users of legacy mappings should use irq_domain_add_simple() which
will use a legacy domain only if an IRQ range is supplied by the
system and will otherwise use a linear domain mapping.
...@@ -111,7 +111,7 @@ static unsigned int icp_hv_get_irq(void) ...@@ -111,7 +111,7 @@ static unsigned int icp_hv_get_irq(void)
if (vec == XICS_IRQ_SPURIOUS) if (vec == XICS_IRQ_SPURIOUS)
return NO_IRQ; return NO_IRQ;
irq = irq_radix_revmap_lookup(xics_host, vec); irq = irq_find_mapping(xics_host, vec);
if (likely(irq != NO_IRQ)) { if (likely(irq != NO_IRQ)) {
xics_push_cppr(vec); xics_push_cppr(vec);
return irq; return irq;
......
...@@ -119,7 +119,7 @@ static unsigned int icp_native_get_irq(void) ...@@ -119,7 +119,7 @@ static unsigned int icp_native_get_irq(void)
if (vec == XICS_IRQ_SPURIOUS) if (vec == XICS_IRQ_SPURIOUS)
return NO_IRQ; return NO_IRQ;
irq = irq_radix_revmap_lookup(xics_host, vec); irq = irq_find_mapping(xics_host, vec);
if (likely(irq != NO_IRQ)) { if (likely(irq != NO_IRQ)) {
xics_push_cppr(vec); xics_push_cppr(vec);
return irq; return irq;
......
...@@ -329,9 +329,6 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq, ...@@ -329,9 +329,6 @@ static int xics_host_map(struct irq_domain *h, unsigned int virq,
pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw);
/* Insert the interrupt mapping into the radix tree for fast lookup */
irq_radix_revmap_insert(xics_host, virq, hw);
/* They aren't all level sensitive but we just don't really know */ /* They aren't all level sensitive but we just don't really know */
irq_set_status_flags(virq, IRQ_LEVEL); irq_set_status_flags(virq, IRQ_LEVEL);
......
config SH_INTC
def_bool y
select IRQ_DOMAIN
comment "Interrupt controller options" comment "Interrupt controller options"
config INTC_USERIMASK config INTC_USERIMASK
......
obj-y := access.o chip.o core.o handle.o virq.o obj-y := access.o chip.o core.o handle.o irqdomain.o virq.o
obj-$(CONFIG_INTC_BALANCING) += balancing.o obj-$(CONFIG_INTC_BALANCING) += balancing.o
obj-$(CONFIG_INTC_USERIMASK) += userimask.o obj-$(CONFIG_INTC_USERIMASK) += userimask.o
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/stat.h> #include <linux/stat.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sh_intc.h> #include <linux/sh_intc.h>
#include <linux/irqdomain.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/syscore_ops.h> #include <linux/syscore_ops.h>
#include <linux/list.h> #include <linux/list.h>
...@@ -310,6 +311,8 @@ int __init register_intc_controller(struct intc_desc *desc) ...@@ -310,6 +311,8 @@ int __init register_intc_controller(struct intc_desc *desc)
BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
intc_irq_domain_init(d, hw);
/* register the vectors one by one */ /* register the vectors one by one */
for (i = 0; i < hw->nr_vectors; i++) { for (i = 0; i < hw->nr_vectors; i++) {
struct intc_vect *vect = hw->vectors + i; struct intc_vect *vect = hw->vectors + i;
...@@ -319,8 +322,8 @@ int __init register_intc_controller(struct intc_desc *desc) ...@@ -319,8 +322,8 @@ int __init register_intc_controller(struct intc_desc *desc)
if (!vect->enum_id) if (!vect->enum_id)
continue; continue;
res = irq_alloc_desc_at(irq, numa_node_id()); res = irq_create_identity_mapping(d->domain, irq);
if (res != irq && res != -EEXIST) { if (unlikely(res)) {
pr_err("can't get irq_desc for %d\n", irq); pr_err("can't get irq_desc for %d\n", irq);
continue; continue;
} }
...@@ -340,8 +343,8 @@ int __init register_intc_controller(struct intc_desc *desc) ...@@ -340,8 +343,8 @@ int __init register_intc_controller(struct intc_desc *desc)
* IRQ support, each vector still needs to have * IRQ support, each vector still needs to have
* its own backing irq_desc. * its own backing irq_desc.
*/ */
res = irq_alloc_desc_at(irq2, numa_node_id()); res = irq_create_identity_mapping(d->domain, irq2);
if (res != irq2 && res != -EEXIST) { if (unlikely(res)) {
pr_err("can't get irq_desc for %d\n", irq2); pr_err("can't get irq_desc for %d\n", irq2);
continue; continue;
} }
......
#include <linux/sh_intc.h> #include <linux/sh_intc.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -66,6 +67,7 @@ struct intc_desc_int { ...@@ -66,6 +67,7 @@ struct intc_desc_int {
unsigned int nr_sense; unsigned int nr_sense;
struct intc_window *window; struct intc_window *window;
unsigned int nr_windows; unsigned int nr_windows;
struct irq_domain *domain;
struct irq_chip chip; struct irq_chip chip;
bool skip_suspend; bool skip_suspend;
}; };
...@@ -187,6 +189,9 @@ unsigned long intc_get_ack_handle(unsigned int irq); ...@@ -187,6 +189,9 @@ unsigned long intc_get_ack_handle(unsigned int irq);
void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d, void intc_enable_disable_enum(struct intc_desc *desc, struct intc_desc_int *d,
intc_enum enum_id, int enable); intc_enum enum_id, int enable);
/* irqdomain.c */
void intc_irq_domain_init(struct intc_desc_int *d, struct intc_hw_desc *hw);
/* virq.c */ /* virq.c */
void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d); void intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d);
void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d); void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d);
......
/*
* IRQ domain support for SH INTC subsystem
*
* Copyright (C) 2012 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#define pr_fmt(fmt) "intc: " fmt
#include <linux/irqdomain.h>
#include <linux/sh_intc.h>
#include <linux/export.h>
#include "internals.h"
/**
* intc_irq_domain_evt_xlate() - Generic xlate for vectored IRQs.
*
* This takes care of exception vector to hwirq translation through
* by way of evt2irq() translation.
*
* Note: For platforms that use a flat vector space without INTEVT this
* basically just mimics irq_domain_xlate_onecell() by way of a nopped
* out evt2irq() implementation.
*/
static int intc_evt_xlate(struct irq_domain *d, struct device_node *ctrlr,
const u32 *intspec, unsigned int intsize,
unsigned long *out_hwirq, unsigned int *out_type)
{
if (WARN_ON(intsize < 1))
return -EINVAL;
*out_hwirq = evt2irq(intspec[0]);
*out_type = IRQ_TYPE_NONE;
return 0;
}
static const struct irq_domain_ops intc_evt_ops = {
.xlate = intc_evt_xlate,
};
void __init intc_irq_domain_init(struct intc_desc_int *d,
struct intc_hw_desc *hw)
{
unsigned int irq_base, irq_end;
/*
* Quick linear revmap check
*/
irq_base = evt2irq(hw->vectors[0].vect);
irq_end = evt2irq(hw->vectors[hw->nr_vectors - 1].vect);
/*
* Linear domains have a hard-wired assertion that IRQs start at
* 0 in order to make some performance optimizations. Lamely
* restrict the linear case to these conditions here, taking the
* tree penalty for linear cases with non-zero hwirq bases.
*/
if (irq_base == 0 && irq_end == (irq_base + hw->nr_vectors - 1))
d->domain = irq_domain_add_linear(NULL, hw->nr_vectors,
&intc_evt_ops, NULL);
else
d->domain = irq_domain_add_tree(NULL, &intc_evt_ops, NULL);
BUG_ON(!d->domain);
}
...@@ -112,6 +112,11 @@ struct irq_domain { ...@@ -112,6 +112,11 @@ struct irq_domain {
}; };
#ifdef CONFIG_IRQ_DOMAIN #ifdef CONFIG_IRQ_DOMAIN
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
const struct irq_domain_ops *ops,
void *host_data);
struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
unsigned int size, unsigned int size,
unsigned int first_irq, unsigned int first_irq,
...@@ -144,16 +149,31 @@ static inline struct irq_domain *irq_domain_add_legacy_isa( ...@@ -144,16 +149,31 @@ static inline struct irq_domain *irq_domain_add_legacy_isa(
extern void irq_domain_remove(struct irq_domain *host); extern void irq_domain_remove(struct irq_domain *host);
extern int irq_domain_associate_many(struct irq_domain *domain,
unsigned int irq_base,
irq_hw_number_t hwirq_base, int count);
static inline int irq_domain_associate(struct irq_domain *domain, unsigned int irq,
irq_hw_number_t hwirq)
{
return irq_domain_associate_many(domain, irq, hwirq, 1);
}
extern unsigned int irq_create_mapping(struct irq_domain *host, extern unsigned int irq_create_mapping(struct irq_domain *host,
irq_hw_number_t hwirq); irq_hw_number_t hwirq);
extern void irq_dispose_mapping(unsigned int virq); extern void irq_dispose_mapping(unsigned int virq);
extern unsigned int irq_find_mapping(struct irq_domain *host, extern unsigned int irq_find_mapping(struct irq_domain *host,
irq_hw_number_t hwirq); irq_hw_number_t hwirq);
extern unsigned int irq_create_direct_mapping(struct irq_domain *host); extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq, extern int irq_create_strict_mappings(struct irq_domain *domain,
irq_hw_number_t hwirq); unsigned int irq_base,
extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host, irq_hw_number_t hwirq_base, int count);
irq_hw_number_t hwirq);
static inline int irq_create_identity_mapping(struct irq_domain *host,
irq_hw_number_t hwirq)
{
return irq_create_strict_mappings(host, hwirq, hwirq, 1);
}
extern unsigned int irq_linear_revmap(struct irq_domain *host, extern unsigned int irq_linear_revmap(struct irq_domain *host,
irq_hw_number_t hwirq); irq_hw_number_t hwirq);
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/kref.h> #include <linux/kref.h>
#include <linux/mod_devicetable.h> #include <linux/mod_devicetable.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/topology.h>
#include <asm/byteorder.h> #include <asm/byteorder.h>
#include <asm/errno.h> #include <asm/errno.h>
...@@ -158,11 +159,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size) ...@@ -158,11 +159,6 @@ static inline unsigned long of_read_ulong(const __be32 *cell, int size)
#define OF_BAD_ADDR ((u64)-1) #define OF_BAD_ADDR ((u64)-1)
#ifndef of_node_to_nid
static inline int of_node_to_nid(struct device_node *np) { return -1; }
#define of_node_to_nid of_node_to_nid
#endif
static inline const char* of_node_full_name(struct device_node *np) static inline const char* of_node_full_name(struct device_node *np)
{ {
return np ? np->full_name : "<no-node>"; return np ? np->full_name : "<no-node>";
...@@ -427,6 +423,15 @@ static inline int of_machine_is_compatible(const char *compat) ...@@ -427,6 +423,15 @@ static inline int of_machine_is_compatible(const char *compat)
while (0) while (0)
#endif /* CONFIG_OF */ #endif /* CONFIG_OF */
#ifndef of_node_to_nid
static inline int of_node_to_nid(struct device_node *np)
{
return numa_node_id();
}
#define of_node_to_nid of_node_to_nid
#endif
/** /**
* of_property_read_bool - Findfrom a property * of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read. * @np: device node from which the property value is to be read.
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/topology.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/smp.h> #include <linux/smp.h>
...@@ -45,7 +46,8 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node, ...@@ -45,7 +46,8 @@ static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
{ {
struct irq_domain *domain; struct irq_domain *domain;
domain = kzalloc(sizeof(*domain), GFP_KERNEL); domain = kzalloc_node(sizeof(*domain), GFP_KERNEL,
of_node_to_nid(of_node));
if (WARN_ON(!domain)) if (WARN_ON(!domain))
return NULL; return NULL;
...@@ -137,6 +139,36 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain, ...@@ -137,6 +139,36 @@ static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq; return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
} }
/**
* irq_domain_add_simple() - Allocate and register a simple irq_domain.
* @of_node: pointer to interrupt controller's device tree node.
* @size: total number of irqs in mapping
* @first_irq: first number of irq block assigned to the domain
* @ops: map/unmap domain callbacks
* @host_data: Controller private data pointer
*
* Allocates a legacy irq_domain if irq_base is positive or a linear
* domain otherwise.
*
* This is intended to implement the expected behaviour for most
* interrupt controllers which is that a linear mapping should
* normally be used unless the system requires a legacy mapping in
* order to support supplying interrupt numbers during non-DT
* registration of devices.
*/
struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
unsigned int size,
unsigned int first_irq,
const struct irq_domain_ops *ops,
void *host_data)
{
if (first_irq > 0)
return irq_domain_add_legacy(of_node, size, first_irq, 0,
ops, host_data);
else
return irq_domain_add_linear(of_node, size, ops, host_data);
}
/** /**
* irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain. * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node. * @of_node: pointer to interrupt controller's device tree node.
...@@ -203,6 +235,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, ...@@ -203,6 +235,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
* one can then use irq_create_mapping() to * one can then use irq_create_mapping() to
* explicitly change them * explicitly change them
*/ */
if (ops->map)
ops->map(domain, irq, hwirq); ops->map(domain, irq, hwirq);
/* Clear norequest flags */ /* Clear norequest flags */
...@@ -215,7 +248,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, ...@@ -215,7 +248,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
EXPORT_SYMBOL_GPL(irq_domain_add_legacy); EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
/** /**
* irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain. * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain.
* @of_node: pointer to interrupt controller's device tree node. * @of_node: pointer to interrupt controller's device tree node.
* @size: Number of interrupts in the domain. * @size: Number of interrupts in the domain.
* @ops: map/unmap domain callbacks * @ops: map/unmap domain callbacks
...@@ -229,7 +262,8 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node, ...@@ -229,7 +262,8 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
struct irq_domain *domain; struct irq_domain *domain;
unsigned int *revmap; unsigned int *revmap;
revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL); revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL,
of_node_to_nid(of_node));
if (WARN_ON(!revmap)) if (WARN_ON(!revmap))
return NULL; return NULL;
...@@ -330,24 +364,112 @@ void irq_set_default_host(struct irq_domain *domain) ...@@ -330,24 +364,112 @@ void irq_set_default_host(struct irq_domain *domain)
} }
EXPORT_SYMBOL_GPL(irq_set_default_host); EXPORT_SYMBOL_GPL(irq_set_default_host);
static int irq_setup_virq(struct irq_domain *domain, unsigned int virq, static void irq_domain_disassociate_many(struct irq_domain *domain,
irq_hw_number_t hwirq) unsigned int irq_base, int count)
{
/*
* disassociate in reverse order;
* not strictly necessary, but nice for unwinding
*/
while (count--) {
int irq = irq_base + count;
struct irq_data *irq_data = irq_get_irq_data(irq);
irq_hw_number_t hwirq = irq_data->hwirq;
if (WARN_ON(!irq_data || irq_data->domain != domain))
continue;
irq_set_status_flags(irq, IRQ_NOREQUEST);
/* remove chip and handler */
irq_set_chip_and_handler(irq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(irq);
/* Tell the PIC about it */
if (domain->ops->unmap)
domain->ops->unmap(domain, irq);
smp_mb();
irq_data->domain = NULL;
irq_data->hwirq = 0;
/* Clear reverse map */
switch(domain->revmap_type) {
case IRQ_DOMAIN_MAP_LINEAR:
if (hwirq < domain->revmap_data.linear.size)
domain->revmap_data.linear.revmap[hwirq] = 0;
break;
case IRQ_DOMAIN_MAP_TREE:
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
break;
}
}
}
int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count)
{ {
unsigned int virq = irq_base;
irq_hw_number_t hwirq = hwirq_base;
int i, ret;
pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
of_node_full_name(domain->of_node), irq_base, (int)hwirq_base, count);
for (i = 0; i < count; i++) {
struct irq_data *irq_data = irq_get_irq_data(virq + i);
if (WARN(!irq_data, "error: irq_desc not allocated; "
"irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
return -EINVAL;
if (WARN(irq_data->domain, "error: irq_desc already associated; "
"irq=%i hwirq=0x%x\n", virq + i, (int)hwirq + i))
return -EINVAL;
};
for (i = 0; i < count; i++, virq++, hwirq++) {
struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *irq_data = irq_get_irq_data(virq);
irq_data->hwirq = hwirq; irq_data->hwirq = hwirq;
irq_data->domain = domain; irq_data->domain = domain;
if (domain->ops->map(domain, virq, hwirq)) { if (domain->ops->map) {
pr_debug("irq-%i==>hwirq-0x%lx mapping failed\n", virq, hwirq); ret = domain->ops->map(domain, virq, hwirq);
if (ret != 0) {
pr_err("irq-%i==>hwirq-0x%lx mapping failed: %d\n",
virq, hwirq, ret);
WARN_ON(1);
irq_data->domain = NULL; irq_data->domain = NULL;
irq_data->hwirq = 0; irq_data->hwirq = 0;
return -1; goto err_unmap;
}
}
switch (domain->revmap_type) {
case IRQ_DOMAIN_MAP_LINEAR:
if (hwirq < domain->revmap_data.linear.size)
domain->revmap_data.linear.revmap[hwirq] = virq;
break;
case IRQ_DOMAIN_MAP_TREE:
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
break;
} }
irq_clear_status_flags(virq, IRQ_NOREQUEST); irq_clear_status_flags(virq, IRQ_NOREQUEST);
}
return 0; return 0;
err_unmap:
irq_domain_disassociate_many(domain, irq_base, i);
return -EINVAL;
} }
EXPORT_SYMBOL_GPL(irq_domain_associate_many);
/** /**
* irq_create_direct_mapping() - Allocate an irq for direct mapping * irq_create_direct_mapping() - Allocate an irq for direct mapping
...@@ -364,10 +486,10 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) ...@@ -364,10 +486,10 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
if (domain == NULL) if (domain == NULL)
domain = irq_default_domain; domain = irq_default_domain;
BUG_ON(domain == NULL); if (WARN_ON(!domain || domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP))
WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP); return 0;
virq = irq_alloc_desc_from(1, 0); virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
if (!virq) { if (!virq) {
pr_debug("create_direct virq allocation failed\n"); pr_debug("create_direct virq allocation failed\n");
return 0; return 0;
...@@ -380,7 +502,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain) ...@@ -380,7 +502,7 @@ unsigned int irq_create_direct_mapping(struct irq_domain *domain)
} }
pr_debug("create_direct obtained virq %d\n", virq); pr_debug("create_direct obtained virq %d\n", virq);
if (irq_setup_virq(domain, virq, virq)) { if (irq_domain_associate(domain, virq, virq)) {
irq_free_desc(virq); irq_free_desc(virq);
return 0; return 0;
} }
...@@ -433,16 +555,15 @@ unsigned int irq_create_mapping(struct irq_domain *domain, ...@@ -433,16 +555,15 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
hint = hwirq % nr_irqs; hint = hwirq % nr_irqs;
if (hint == 0) if (hint == 0)
hint++; hint++;
virq = irq_alloc_desc_from(hint, 0); virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node));
if (virq <= 0) if (virq <= 0)
virq = irq_alloc_desc_from(1, 0); virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node));
if (virq <= 0) { if (virq <= 0) {
pr_debug("-> virq allocation failed\n"); pr_debug("-> virq allocation failed\n");
return 0; return 0;
} }
if (irq_setup_virq(domain, virq, hwirq)) { if (irq_domain_associate(domain, virq, hwirq)) {
if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
irq_free_desc(virq); irq_free_desc(virq);
return 0; return 0;
} }
...@@ -454,6 +575,44 @@ unsigned int irq_create_mapping(struct irq_domain *domain, ...@@ -454,6 +575,44 @@ unsigned int irq_create_mapping(struct irq_domain *domain,
} }
EXPORT_SYMBOL_GPL(irq_create_mapping); EXPORT_SYMBOL_GPL(irq_create_mapping);
/**
* irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
* @domain: domain owning the interrupt range
* @irq_base: beginning of linux IRQ range
* @hwirq_base: beginning of hardware IRQ range
* @count: Number of interrupts to map
*
* This routine is used for allocating and mapping a range of hardware
* irqs to linux irqs where the linux irq numbers are at pre-defined
* locations. For use by controllers that already have static mappings
* to insert in to the domain.
*
* Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
* domain insertion.
*
* 0 is returned upon success, while any failure to establish a static
* mapping is treated as an error.
*/
int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
irq_hw_number_t hwirq_base, int count)
{
int ret;
ret = irq_alloc_descs(irq_base, irq_base, count,
of_node_to_nid(domain->of_node));
if (unlikely(ret < 0))
return ret;
ret = irq_domain_associate_many(domain, irq_base, hwirq_base, count);
if (unlikely(ret < 0)) {
irq_free_descs(irq_base, count);
return ret;
}
return 0;
}
EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
unsigned int irq_create_of_mapping(struct device_node *controller, unsigned int irq_create_of_mapping(struct device_node *controller,
const u32 *intspec, unsigned int intsize) const u32 *intspec, unsigned int intsize)
{ {
...@@ -511,7 +670,6 @@ void irq_dispose_mapping(unsigned int virq) ...@@ -511,7 +670,6 @@ void irq_dispose_mapping(unsigned int virq)
{ {
struct irq_data *irq_data = irq_get_irq_data(virq); struct irq_data *irq_data = irq_get_irq_data(virq);
struct irq_domain *domain; struct irq_domain *domain;
irq_hw_number_t hwirq;
if (!virq || !irq_data) if (!virq || !irq_data)
return; return;
...@@ -524,33 +682,7 @@ void irq_dispose_mapping(unsigned int virq) ...@@ -524,33 +682,7 @@ void irq_dispose_mapping(unsigned int virq)
if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
return; return;
irq_set_status_flags(virq, IRQ_NOREQUEST); irq_domain_disassociate_many(domain, virq, 1);
/* remove chip and handler */
irq_set_chip_and_handler(virq, NULL, NULL);
/* Make sure it's completed */
synchronize_irq(virq);
/* Tell the PIC about it */
if (domain->ops->unmap)
domain->ops->unmap(domain, virq);
smp_mb();
/* Clear reverse map */
hwirq = irq_data->hwirq;
switch(domain->revmap_type) {
case IRQ_DOMAIN_MAP_LINEAR:
if (hwirq < domain->revmap_data.linear.size)
domain->revmap_data.linear.revmap[hwirq] = 0;
break;
case IRQ_DOMAIN_MAP_TREE:
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
break;
}
irq_free_desc(virq); irq_free_desc(virq);
} }
EXPORT_SYMBOL_GPL(irq_dispose_mapping); EXPORT_SYMBOL_GPL(irq_dispose_mapping);
...@@ -559,16 +691,11 @@ EXPORT_SYMBOL_GPL(irq_dispose_mapping); ...@@ -559,16 +691,11 @@ EXPORT_SYMBOL_GPL(irq_dispose_mapping);
* irq_find_mapping() - Find a linux irq from an hw irq number. * irq_find_mapping() - Find a linux irq from an hw irq number.
* @domain: domain owning this hardware interrupt * @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space * @hwirq: hardware irq number in that domain space
*
* This is a slow path, for use by generic code. It's expected that an
* irq controller implementation directly calls the appropriate low level
* mapping function.
*/ */
unsigned int irq_find_mapping(struct irq_domain *domain, unsigned int irq_find_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq) irq_hw_number_t hwirq)
{ {
unsigned int i; struct irq_data *data;
unsigned int hint = hwirq % nr_irqs;
/* Look for default domain if nececssary */ /* Look for default domain if nececssary */
if (domain == NULL) if (domain == NULL)
...@@ -576,115 +703,47 @@ unsigned int irq_find_mapping(struct irq_domain *domain, ...@@ -576,115 +703,47 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
if (domain == NULL) if (domain == NULL)
return 0; return 0;
/* legacy -> bail early */ switch (domain->revmap_type) {
if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY) case IRQ_DOMAIN_MAP_LEGACY:
return irq_domain_legacy_revmap(domain, hwirq); return irq_domain_legacy_revmap(domain, hwirq);
case IRQ_DOMAIN_MAP_LINEAR:
/* Slow path does a linear search of the map */ return irq_linear_revmap(domain, hwirq);
if (hint == 0) case IRQ_DOMAIN_MAP_TREE:
hint = 1;
i = hint;
do {
struct irq_data *data = irq_get_irq_data(i);
if (data && (data->domain == domain) && (data->hwirq == hwirq))
return i;
i++;
if (i >= nr_irqs)
i = 1;
} while(i != hint);
return 0;
}
EXPORT_SYMBOL_GPL(irq_find_mapping);
/**
* irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space
*
* This is a fast path, for use by irq controller code that uses radix tree
* revmaps
*/
unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
struct irq_data *irq_data;
if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
return irq_find_mapping(domain, hwirq);
/*
* Freeing an irq can delete nodes along the path to
* do the lookup via call_rcu.
*/
rcu_read_lock(); rcu_read_lock();
irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq); data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
rcu_read_unlock(); rcu_read_unlock();
if (data)
/* return data->irq;
* If found in radix tree, then fine. break;
* Else fallback to linear lookup - this should not happen in practice case IRQ_DOMAIN_MAP_NOMAP:
* as it means that we failed to insert the node in the radix tree. data = irq_get_irq_data(hwirq);
*/ if (data && (data->domain == domain) && (data->hwirq == hwirq))
return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq); return hwirq;
} break;
EXPORT_SYMBOL_GPL(irq_radix_revmap_lookup);
/**
* irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
* @domain: domain owning this hardware interrupt
* @virq: linux irq number
* @hwirq: hardware irq number in that domain space
*
* This is for use by irq controllers that use a radix tree reverse
* mapping for fast lookup.
*/
void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
irq_hw_number_t hwirq)
{
struct irq_data *irq_data = irq_get_irq_data(virq);
if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
return;
if (virq) {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
} }
return 0;
} }
EXPORT_SYMBOL_GPL(irq_radix_revmap_insert); EXPORT_SYMBOL_GPL(irq_find_mapping);
/** /**
* irq_linear_revmap() - Find a linux irq from a hw irq number. * irq_linear_revmap() - Find a linux irq from a hw irq number.
* @domain: domain owning this hardware interrupt * @domain: domain owning this hardware interrupt
* @hwirq: hardware irq number in that domain space * @hwirq: hardware irq number in that domain space
* *
* This is a fast path, for use by irq controller code that uses linear * This is a fast path that can be called directly by irq controller code to
* revmaps. It does fallback to the slow path if the revmap doesn't exist * save a handful of instructions.
* yet and will create the revmap entry with appropriate locking
*/ */
unsigned int irq_linear_revmap(struct irq_domain *domain, unsigned int irq_linear_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq) irq_hw_number_t hwirq)
{ {
unsigned int *revmap; BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
return irq_find_mapping(domain, hwirq);
/* Check revmap bounds */
if (unlikely(hwirq >= domain->revmap_data.linear.size))
return irq_find_mapping(domain, hwirq);
/* Check if revmap was allocated */ /* Check revmap bounds; complain if exceeded */
revmap = domain->revmap_data.linear.revmap; if (WARN_ON(hwirq >= domain->revmap_data.linear.size))
if (unlikely(revmap == NULL)) return 0;
return irq_find_mapping(domain, hwirq);
/* Fill up revmap with slow path if no mapping found */
if (unlikely(!revmap[hwirq]))
revmap[hwirq] = irq_find_mapping(domain, hwirq);
return revmap[hwirq]; return domain->revmap_data.linear.revmap[hwirq];
} }
EXPORT_SYMBOL_GPL(irq_linear_revmap); EXPORT_SYMBOL_GPL(irq_linear_revmap);
...@@ -761,12 +820,6 @@ static int __init irq_debugfs_init(void) ...@@ -761,12 +820,6 @@ static int __init irq_debugfs_init(void)
__initcall(irq_debugfs_init); __initcall(irq_debugfs_init);
#endif /* CONFIG_IRQ_DOMAIN_DEBUG */ #endif /* CONFIG_IRQ_DOMAIN_DEBUG */
static int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
return 0;
}
/** /**
* irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
* *
...@@ -829,7 +882,6 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, ...@@ -829,7 +882,6 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d,
EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
const struct irq_domain_ops irq_domain_simple_ops = { const struct irq_domain_ops irq_domain_simple_ops = {
.map = irq_domain_simple_map,
.xlate = irq_domain_xlate_onetwocell, .xlate = irq_domain_xlate_onetwocell,
}; };
EXPORT_SYMBOL_GPL(irq_domain_simple_ops); EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment