Merge branch 'irq/for-gpio' into irq/core

Merge the flow handlers and irq domain extensions which are in a separate
branch so they can be consumed by the gpio folks.
This commit is contained in:
Thomas Gleixner 2017-08-18 11:22:27 +02:00
commit 6629695465
5 changed files with 318 additions and 30 deletions

View File

@ -568,6 +568,8 @@ extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
extern int irq_chip_pm_get(struct irq_data *data);
extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);

View File

@ -460,6 +460,9 @@ extern void irq_domain_free_irqs_common(struct irq_domain *domain,
extern void irq_domain_free_irqs_top(struct irq_domain *domain,
unsigned int virq, unsigned int nr_irqs);
extern int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg);
extern int irq_domain_pop_irq(struct irq_domain *domain, int virq);
extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs, void *arg);

View File

@ -73,6 +73,10 @@ config IRQ_DOMAIN_HIERARCHY
bool
select IRQ_DOMAIN
# Support for hierarchical fasteoi+edge and fasteoi+level handlers
config IRQ_FASTEOI_HIERARCHY_HANDLERS
bool
# Generic IRQ IPI support
config GENERIC_IRQ_IPI
bool

View File

@ -1092,6 +1092,112 @@ void irq_cpu_offline(void)
}
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
/**
* handle_fasteoi_ack_irq - irq handler for edge hierarchy
* stacked on transparent controllers
*
* @desc: the interrupt description structure for this irq
*
* Like handle_fasteoi_irq(), but for use with hierarchy where
* the irq_chip also needs to have its ->irq_ack() function
* called.
*/
void handle_fasteoi_ack_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
if (!irq_may_run(desc))
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
/* Start handling the irq */
desc->irq_data.chip->irq_ack(&desc->irq_data);
preflow_handler(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
/**
* handle_fasteoi_mask_irq - irq handler for level hierarchy
* stacked on transparent controllers
*
* @desc: the interrupt description structure for this irq
*
* Like handle_fasteoi_irq(), but for use with hierarchy where
* the irq_chip also needs to have its ->irq_mask_ack() function
* called.
*/
void handle_fasteoi_mask_irq(struct irq_desc *desc)
{
struct irq_chip *chip = desc->irq_data.chip;
raw_spin_lock(&desc->lock);
mask_ack_irq(desc);
if (!irq_may_run(desc))
goto out;
desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
/*
* If its disabled or no action available
* then mask it and get out of here:
*/
if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
desc->istate |= IRQS_PENDING;
mask_irq(desc);
goto out;
}
kstat_incr_irqs_this_cpu(desc);
if (desc->istate & IRQS_ONESHOT)
mask_irq(desc);
preflow_handler(desc);
handle_irq_event(desc);
cond_unmask_eoi_irq(desc, chip);
raw_spin_unlock(&desc->lock);
return;
out:
if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
chip->irq_eoi(&desc->irq_data);
raw_spin_unlock(&desc->lock);
}
EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
/**
* irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
* NULL)
@ -1105,6 +1211,7 @@ void irq_chip_enable_parent(struct irq_data *data)
else
data->chip->irq_unmask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
/**
* irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
@ -1119,6 +1226,7 @@ void irq_chip_disable_parent(struct irq_data *data)
else
data->chip->irq_mask(data);
}
EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
/**
* irq_chip_ack_parent - Acknowledge the parent interrupt
@ -1181,6 +1289,7 @@ int irq_chip_set_affinity_parent(struct irq_data *data,
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
/**
* irq_chip_set_type_parent - Set IRQ type on the parent interrupt

View File

@ -455,6 +455,31 @@ void irq_set_default_host(struct irq_domain *domain)
}
EXPORT_SYMBOL_GPL(irq_set_default_host);
static void irq_domain_clear_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq)
{
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
}
}
static void irq_domain_set_mapping(struct irq_domain *domain,
irq_hw_number_t hwirq,
struct irq_data *irq_data)
{
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = irq_data->irq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
}
}
void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
{
struct irq_data *irq_data = irq_get_irq_data(irq);
@ -483,13 +508,7 @@ void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
domain->mapcount--;
/* Clear reverse map for this hwirq */
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
}
irq_domain_clear_mapping(domain, hwirq);
}
int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
@ -533,13 +552,7 @@ int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
}
domain->mapcount++;
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = virq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
mutex_unlock(&revmap_trees_mutex);
}
irq_domain_set_mapping(domain, hwirq, irq_data);
mutex_unlock(&irq_domain_mutex);
irq_clear_status_flags(virq, IRQ_NOREQUEST);
@ -1138,16 +1151,9 @@ static void irq_domain_insert_irq(int virq)
for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
struct irq_domain *domain = data->domain;
irq_hw_number_t hwirq = data->hwirq;
domain->mapcount++;
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = virq;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_insert(&domain->revmap_tree, hwirq, data);
mutex_unlock(&revmap_trees_mutex);
}
irq_domain_set_mapping(domain, data->hwirq, data);
/* If not already assigned, give the domain the chip's name */
if (!domain->name && data->chip)
@ -1171,13 +1177,7 @@ static void irq_domain_remove_irq(int virq)
irq_hw_number_t hwirq = data->hwirq;
domain->mapcount--;
if (hwirq < domain->revmap_size) {
domain->linear_revmap[hwirq] = 0;
} else {
mutex_lock(&revmap_trees_mutex);
radix_tree_delete(&domain->revmap_tree, hwirq);
mutex_unlock(&revmap_trees_mutex);
}
irq_domain_clear_mapping(domain, hwirq);
}
}
@ -1362,7 +1362,8 @@ static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
unsigned int irq_base,
unsigned int nr_irqs)
{
domain->ops->free(domain, irq_base, nr_irqs);
if (domain->ops->free)
domain->ops->free(domain, irq_base, nr_irqs);
}
int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
@ -1448,6 +1449,175 @@ int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
return ret;
}
/* The irq_data was moved, fix the revmap to refer to the new location */
static void irq_domain_fix_revmap(struct irq_data *d)
{
void **slot;
if (d->hwirq < d->domain->revmap_size)
return; /* Not using radix tree. */
/* Fix up the revmap. */
mutex_lock(&revmap_trees_mutex);
slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
if (slot)
radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
mutex_unlock(&revmap_trees_mutex);
}
/**
* irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
* @domain: Domain to push.
* @virq: Irq to push the domain in to.
* @arg: Passed to the irq_domain_ops alloc() function.
*
* For an already existing irqdomain hierarchy, as might be obtained
* via a call to pci_enable_msix(), add an additional domain to the
* head of the processing chain. Must be called before request_irq()
* has been called.
*/
int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
{
struct irq_data *child_irq_data;
struct irq_data *root_irq_data = irq_get_irq_data(virq);
struct irq_desc *desc;
int rv = 0;
/*
* Check that no action has been set, which indicates the virq
* is in a state where this function doesn't have to deal with
* races between interrupt handling and maintaining the
* hierarchy. This will catch gross misuse. Attempting to
* make the check race free would require holding locks across
* calls to struct irq_domain_ops->alloc(), which could lead
* to deadlock, so we just do a simple check before starting.
*/
desc = irq_to_desc(virq);
if (!desc)
return -EINVAL;
if (WARN_ON(desc->action))
return -EBUSY;
if (domain == NULL)
return -EINVAL;
if (WARN_ON(!irq_domain_is_hierarchy(domain)))
return -EINVAL;
if (domain->parent != root_irq_data->domain)
return -EINVAL;
if (!root_irq_data)
return -EINVAL;
child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
irq_data_get_node(root_irq_data));
if (!child_irq_data)
return -ENOMEM;
mutex_lock(&irq_domain_mutex);
/* Copy the original irq_data. */
*child_irq_data = *root_irq_data;
/*
* Overwrite the root_irq_data, which is embedded in struct
* irq_desc, with values for this domain.
*/
root_irq_data->parent_data = child_irq_data;
root_irq_data->domain = domain;
root_irq_data->mask = 0;
root_irq_data->hwirq = 0;
root_irq_data->chip = NULL;
root_irq_data->chip_data = NULL;
/* May (probably does) set hwirq, chip, etc. */
rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
if (rv) {
/* Restore the original irq_data. */
*root_irq_data = *child_irq_data;
goto error;
}
irq_domain_fix_revmap(child_irq_data);
irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
error:
mutex_unlock(&irq_domain_mutex);
return rv;
}
EXPORT_SYMBOL_GPL(irq_domain_push_irq);
/**
* irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
* @domain: Domain to remove.
* @virq: Irq to remove the domain from.
*
* Undo the effects of a call to irq_domain_push_irq(). Must be
* called either before request_irq() or after free_irq().
*/
int irq_domain_pop_irq(struct irq_domain *domain, int virq)
{
struct irq_data *root_irq_data = irq_get_irq_data(virq);
struct irq_data *child_irq_data;
struct irq_data *tmp_irq_data;
struct irq_desc *desc;
/*
* Check that no action is set, which indicates the virq is in
* a state where this function doesn't have to deal with races
* between interrupt handling and maintaining the hierarchy.
* This will catch gross misuse. Attempting to make the check
* race free would require holding locks across calls to
* struct irq_domain_ops->free(), which could lead to
* deadlock, so we just do a simple check before starting.
*/
desc = irq_to_desc(virq);
if (!desc)
return -EINVAL;
if (WARN_ON(desc->action))
return -EBUSY;
if (domain == NULL)
return -EINVAL;
if (!root_irq_data)
return -EINVAL;
tmp_irq_data = irq_domain_get_irq_data(domain, virq);
/* We can only "pop" if this domain is at the top of the list */
if (WARN_ON(root_irq_data != tmp_irq_data))
return -EINVAL;
if (WARN_ON(root_irq_data->domain != domain))
return -EINVAL;
child_irq_data = root_irq_data->parent_data;
if (WARN_ON(!child_irq_data))
return -EINVAL;
mutex_lock(&irq_domain_mutex);
root_irq_data->parent_data = NULL;
irq_domain_clear_mapping(domain, root_irq_data->hwirq);
irq_domain_free_irqs_hierarchy(domain, virq, 1);
/* Restore the original irq_data. */
*root_irq_data = *child_irq_data;
irq_domain_fix_revmap(root_irq_data);
mutex_unlock(&irq_domain_mutex);
kfree(child_irq_data);
return 0;
}
EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
/**
* irq_domain_free_irqs - Free IRQ number and associated data structures
* @virq: base IRQ number