irqdomain: merge linear and tree reverse mappings.

Keeping them separate makes irq_domain more complex and adds a lot of
code (as proven by the diffstat).  Merging them simplifies the whole
scheme.  This change makes it so both the tree and linear methods can be
used by the same irq_domain instance.  If the hwirq is less than the
->linear_size, then the linear map is used to reverse map the hwirq.
Otherwise the radix tree is used.  The test for which map to use is no
more expensive that the existing code, so the performance of fast path
is preserved.

It also means that complex interrupt controllers can use both the
linear map and a tree in the same domain.  This may be useful for an
interrupt controller with a base set of core irqs and a large number
of GPIOs which might be used as irqs.  The linear map could cover the
core irqs, and the tree used for thas irqs.  The linear map could
cover the core irqs, and the tree used for the gpios.

v2: Drop reorganization of revmap data

Signed-off-by: Grant Likely <grant.likely@secretlab.ca>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Rob Herring <rob.herring@calxeda.com>
This commit is contained in:
Grant Likely 2012-07-11 17:24:31 +01:00 committed by Grant Likely
parent 0bb4afb45d
commit cef5075c8c
2 changed files with 39 additions and 86 deletions

View File

@ -75,7 +75,6 @@ struct irq_domain_chip_generic;
* @link: Element in global irq_domain list. * @link: Element in global irq_domain list.
* @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This * @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This
* will be one of the IRQ_DOMAIN_MAP_* values. * will be one of the IRQ_DOMAIN_MAP_* values.
* @revmap_data: Revmap method specific data.
* @ops: pointer to irq_domain methods * @ops: pointer to irq_domain methods
* @host_data: private data pointer for use by owner. Not touched by irq_domain * @host_data: private data pointer for use by owner. Not touched by irq_domain
* core code. * core code.
@ -93,10 +92,9 @@ struct irq_domain {
/* type of reverse mapping_technique */ /* type of reverse mapping_technique */
unsigned int revmap_type; unsigned int revmap_type;
union { struct {
struct { struct {
unsigned int size; unsigned int size;
unsigned int *revmap;
} linear; } linear;
struct { struct {
unsigned int max_irq; unsigned int max_irq;
@ -111,11 +109,13 @@ struct irq_domain {
struct device_node *of_node; struct device_node *of_node;
/* Optional pointer to generic interrupt chips */ /* Optional pointer to generic interrupt chips */
struct irq_domain_chip_generic *gc; struct irq_domain_chip_generic *gc;
/* Linear reverse map */
unsigned int linear_revmap[];
}; };
#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */ #define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */ #define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
#ifdef CONFIG_IRQ_DOMAIN #ifdef CONFIG_IRQ_DOMAIN
struct irq_domain *irq_domain_add_simple(struct device_node *of_node, struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
@ -137,10 +137,6 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
unsigned int max_irq, unsigned int max_irq,
const struct irq_domain_ops *ops, const struct irq_domain_ops *ops,
void *host_data); void *host_data);
struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data);
extern struct irq_domain *irq_find_host(struct device_node *node); extern struct irq_domain *irq_find_host(struct device_node *node);
extern void irq_set_default_host(struct irq_domain *host); extern void irq_set_default_host(struct irq_domain *host);
@ -152,6 +148,12 @@ static inline struct irq_domain *irq_domain_add_legacy_isa(
return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops, return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
host_data); host_data);
} }
static inline struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
{
return irq_domain_add_linear(of_node, 0, ops, host_data);
}
extern void irq_domain_remove(struct irq_domain *host); extern void irq_domain_remove(struct irq_domain *host);

View File

@ -34,22 +34,24 @@ static struct irq_domain *irq_default_domain;
* to IRQ domain, or NULL on failure. * to IRQ domain, or NULL on failure.
*/ */
static struct irq_domain *irq_domain_alloc(struct device_node *of_node, static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
unsigned int revmap_type, unsigned int revmap_type, int size,
const struct irq_domain_ops *ops, const struct irq_domain_ops *ops,
void *host_data) void *host_data)
{ {
struct irq_domain *domain; struct irq_domain *domain;
domain = kzalloc_node(sizeof(*domain), GFP_KERNEL, domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
of_node_to_nid(of_node)); GFP_KERNEL, of_node_to_nid(of_node));
if (WARN_ON(!domain)) if (WARN_ON(!domain))
return NULL; return NULL;
/* Fill structure */ /* Fill structure */
INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
domain->revmap_type = revmap_type; domain->revmap_type = revmap_type;
domain->ops = ops; domain->ops = ops;
domain->host_data = host_data; domain->host_data = host_data;
domain->of_node = of_node_get(of_node); domain->of_node = of_node_get(of_node);
domain->revmap_data.linear.size = size;
return domain; return domain;
} }
@ -81,22 +83,12 @@ void irq_domain_remove(struct irq_domain *domain)
{ {
mutex_lock(&irq_domain_mutex); mutex_lock(&irq_domain_mutex);
switch (domain->revmap_type) { /*
case IRQ_DOMAIN_MAP_TREE: * radix_tree_delete() takes care of destroying the root
/* * node when all entries are removed. Shout if there are
* radix_tree_delete() takes care of destroying the root * any mappings left.
* node when all entries are removed. Shout if there are */
* any mappings left. WARN_ON(domain->revmap_data.tree.height);
*/
WARN_ON(domain->revmap_data.tree.height);
break;
case IRQ_DOMAIN_MAP_LINEAR:
kfree(domain->revmap_data.linear.revmap);
domain->revmap_data.linear.size = 0;
break;
case IRQ_DOMAIN_MAP_NOMAP:
break;
}
list_del(&domain->link); list_del(&domain->link);
@ -223,20 +215,11 @@ struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
void *host_data) void *host_data)
{ {
struct irq_domain *domain; struct irq_domain *domain;
unsigned int *revmap;
revmap = kzalloc_node(sizeof(*revmap) * size, GFP_KERNEL, domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, size, ops, host_data);
of_node_to_nid(of_node)); if (!domain)
if (WARN_ON(!revmap))
return NULL; return NULL;
domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
if (!domain) {
kfree(revmap);
return NULL;
}
domain->revmap_data.linear.size = size;
domain->revmap_data.linear.revmap = revmap;
irq_domain_add(domain); irq_domain_add(domain);
return domain; return domain;
} }
@ -248,7 +231,7 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
void *host_data) void *host_data)
{ {
struct irq_domain *domain = irq_domain_alloc(of_node, struct irq_domain *domain = irq_domain_alloc(of_node,
IRQ_DOMAIN_MAP_NOMAP, ops, host_data); IRQ_DOMAIN_MAP_NOMAP, 0, ops, host_data);
if (domain) { if (domain) {
domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0; domain->revmap_data.nomap.max_irq = max_irq ? max_irq : ~0;
irq_domain_add(domain); irq_domain_add(domain);
@ -257,28 +240,6 @@ struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
} }
EXPORT_SYMBOL_GPL(irq_domain_add_nomap); EXPORT_SYMBOL_GPL(irq_domain_add_nomap);
/**
* irq_domain_add_tree()
* @of_node: pointer to interrupt controller's device tree node.
* @ops: map/unmap domain callbacks
*
* Note: The radix tree will be allocated later during boot automatically
* (the reverse mapping will use the slow path until that happens).
*/
struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
const struct irq_domain_ops *ops,
void *host_data)
{
struct irq_domain *domain = irq_domain_alloc(of_node,
IRQ_DOMAIN_MAP_TREE, ops, host_data);
if (domain) {
INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
irq_domain_add(domain);
}
return domain;
}
EXPORT_SYMBOL_GPL(irq_domain_add_tree);
/** /**
* irq_find_host() - Locates a domain for a given device node * irq_find_host() - Locates a domain for a given device node
* @node: device-tree node of the interrupt controller * @node: device-tree node of the interrupt controller
@ -359,17 +320,13 @@ static void irq_domain_disassociate_many(struct irq_domain *domain,
irq_data->domain = NULL; irq_data->domain = NULL;
irq_data->hwirq = 0; irq_data->hwirq = 0;
/* Clear reverse map */ /* Clear reverse map for this hwirq */
switch(domain->revmap_type) { if (hwirq < domain->revmap_data.linear.size) {
case IRQ_DOMAIN_MAP_LINEAR: domain->linear_revmap[hwirq] = 0;
if (hwirq < domain->revmap_data.linear.size) } else {
domain->revmap_data.linear.revmap[hwirq] = 0;
break;
case IRQ_DOMAIN_MAP_TREE:
mutex_lock(&revmap_trees_mutex); mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_data.tree, hwirq); radix_tree_delete(&domain->revmap_data.tree, hwirq);
mutex_unlock(&revmap_trees_mutex); mutex_unlock(&revmap_trees_mutex);
break;
} }
} }
} }
@ -421,16 +378,12 @@ int irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
domain->name = irq_data->chip->name; domain->name = irq_data->chip->name;
} }
switch (domain->revmap_type) { if (hwirq < domain->revmap_data.linear.size) {
case IRQ_DOMAIN_MAP_LINEAR: domain->linear_revmap[hwirq] = virq;
if (hwirq < domain->revmap_data.linear.size) } else {
domain->revmap_data.linear.revmap[hwirq] = virq;
break;
case IRQ_DOMAIN_MAP_TREE:
mutex_lock(&revmap_trees_mutex); mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data); radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex); mutex_unlock(&revmap_trees_mutex);
break;
} }
irq_clear_status_flags(virq, IRQ_NOREQUEST); irq_clear_status_flags(virq, IRQ_NOREQUEST);
@ -667,13 +620,6 @@ unsigned int irq_find_mapping(struct irq_domain *domain,
switch (domain->revmap_type) { switch (domain->revmap_type) {
case IRQ_DOMAIN_MAP_LINEAR: case IRQ_DOMAIN_MAP_LINEAR:
return irq_linear_revmap(domain, hwirq); return irq_linear_revmap(domain, hwirq);
case IRQ_DOMAIN_MAP_TREE:
rcu_read_lock();
data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
rcu_read_unlock();
if (data)
return data->irq;
break;
case IRQ_DOMAIN_MAP_NOMAP: case IRQ_DOMAIN_MAP_NOMAP:
data = irq_get_irq_data(hwirq); data = irq_get_irq_data(hwirq);
if (data && (data->domain == domain) && (data->hwirq == hwirq)) if (data && (data->domain == domain) && (data->hwirq == hwirq))
@ -696,13 +642,18 @@ EXPORT_SYMBOL_GPL(irq_find_mapping);
unsigned int irq_linear_revmap(struct irq_domain *domain, unsigned int irq_linear_revmap(struct irq_domain *domain,
irq_hw_number_t hwirq) irq_hw_number_t hwirq)
{ {
struct irq_data *data;
BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR); BUG_ON(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR);
/* Check revmap bounds; complain if exceeded */ /* Check revmap bounds; complain if exceeded */
if (WARN_ON(hwirq >= domain->revmap_data.linear.size)) if (hwirq >= domain->revmap_data.linear.size) {
return 0; rcu_read_lock();
data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
rcu_read_unlock();
return data ? data->irq : 0;
}
return domain->revmap_data.linear.revmap[hwirq]; return domain->linear_revmap[hwirq];
} }
EXPORT_SYMBOL_GPL(irq_linear_revmap); EXPORT_SYMBOL_GPL(irq_linear_revmap);