genirq: Introduce effective affinity mask

There is currently no way to evaluate the effective affinity mask of a
given interrupt. Many irq chips allow only a single target CPU or a subset
of CPUs in the affinity mask.

Updating the mask at the time of setting the affinity to the subset would
be counterproductive because information for cpu hotplug about assigned
interrupt affinities gets lost. On CPU hotplug it's also pointless to force
migrate an interrupt, which is not targeted at the CPU effectively. But
currently the information is not available.

Provide a seperate mask to be updated by the irq_chip->irq_set_affinity()
implementations. Implement the read only proc files so the user can see the
effective mask as well w/o trying to deduce it from /proc/interrupts.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Marc Zyngier <marc.zyngier@arm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Christoph Hellwig <hch@lst.de>
Link: http://lkml.kernel.org/r/20170619235446.247834245@linutronix.de
This commit is contained in:
Thomas Gleixner 2017-06-20 01:37:38 +02:00
parent c1a8038696
commit 0d3f54257d
5 changed files with 134 additions and 7 deletions

View File

@ -137,6 +137,9 @@ struct irq_domain;
* @affinity: IRQ affinity on SMP. If this is an IPI
* related irq, then this is the mask of the
* CPUs to which an IPI can be sent.
* @effective_affinity: The effective IRQ affinity on SMP as some irq
* chips do not allow multi CPU destinations.
* A subset of @affinity.
* @msi_desc: MSI descriptor
* @ipi_offset: Offset of first IPI target cpu in @affinity. Optional.
*/
@ -148,6 +151,9 @@ struct irq_common_data {
void *handler_data;
struct msi_desc *msi_desc;
cpumask_var_t affinity;
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
cpumask_var_t effective_affinity;
#endif
#ifdef CONFIG_GENERIC_IRQ_IPI
unsigned int ipi_offset;
#endif
@ -737,6 +743,29 @@ static inline struct cpumask *irq_data_get_affinity_mask(struct irq_data *d)
return d->common->affinity;
}
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static inline
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
return d->common->effective_affinity;
}
static inline void irq_data_update_effective_affinity(struct irq_data *d,
const struct cpumask *m)
{
cpumask_copy(d->common->effective_affinity, m);
}
#else
static inline void irq_data_update_effective_affinity(struct irq_data *d,
const struct cpumask *m)
{
}
static inline
struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d)
{
return d->common->affinity;
}
#endif
unsigned int arch_dynirq_lower_bound(unsigned int from);
int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,

View File

@ -21,6 +21,10 @@ config GENERIC_IRQ_SHOW
config GENERIC_IRQ_SHOW_LEVEL
bool
# Supports effective affinity mask
config GENERIC_IRQ_EFFECTIVE_AFF_MASK
bool
# Facility to allocate a hardware interrupt. This is legacy support
# and should not be used in new code. Use irq domains instead.
config GENERIC_IRQ_LEGACY_ALLOC_HWIRQ

View File

@ -36,6 +36,10 @@ static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc)
msk = irq_data_get_affinity_mask(data);
seq_printf(m, "affinity: %*pbl\n", cpumask_pr_args(msk));
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
msk = irq_data_get_effective_affinity_mask(data);
seq_printf(m, "effectiv: %*pbl\n", cpumask_pr_args(msk));
#endif
#ifdef CONFIG_GENERIC_PENDING_IRQ
msk = desc->pending_mask;
seq_printf(m, "pending: %*pbl\n", cpumask_pr_args(msk));

View File

@ -60,8 +60,19 @@ static int alloc_masks(struct irq_desc *desc, int node)
GFP_KERNEL, node))
return -ENOMEM;
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
GFP_KERNEL, node)) {
free_cpumask_var(desc->irq_common_data.affinity);
return -ENOMEM;
}
#endif
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
free_cpumask_var(desc->irq_common_data.effective_affinity);
#endif
free_cpumask_var(desc->irq_common_data.affinity);
return -ENOMEM;
}
@ -324,6 +335,9 @@ static void free_masks(struct irq_desc *desc)
free_cpumask_var(desc->pending_mask);
#endif
free_cpumask_var(desc->irq_common_data.affinity);
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
free_cpumask_var(desc->irq_common_data.effective_affinity);
#endif
}
#else
static inline void free_masks(struct irq_desc *desc) { }

View File

@ -37,19 +37,47 @@ static struct proc_dir_entry *root_irq_dir;
#ifdef CONFIG_SMP
enum {
AFFINITY,
AFFINITY_LIST,
EFFECTIVE,
EFFECTIVE_LIST,
};
static int show_irq_affinity(int type, struct seq_file *m)
{
struct irq_desc *desc = irq_to_desc((long)m->private);
const struct cpumask *mask = desc->irq_common_data.affinity;
const struct cpumask *mask;
switch (type) {
case AFFINITY:
case AFFINITY_LIST:
mask = desc->irq_common_data.affinity;
#ifdef CONFIG_GENERIC_PENDING_IRQ
if (irqd_is_setaffinity_pending(&desc->irq_data))
mask = desc->pending_mask;
#endif
if (type)
break;
case EFFECTIVE:
case EFFECTIVE_LIST:
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
mask = desc->irq_common_data.effective_affinity;
break;
#else
return -EINVAL;
#endif
};
switch (type) {
case AFFINITY_LIST:
case EFFECTIVE_LIST:
seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
else
break;
case AFFINITY:
case EFFECTIVE:
seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
break;
}
return 0;
}
@ -80,12 +108,12 @@ static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
int no_irq_affinity;
static int irq_affinity_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(0, m);
return show_irq_affinity(AFFINITY, m);
}
static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(1, m);
return show_irq_affinity(AFFINITY_LIST, m);
}
@ -185,6 +213,44 @@ static const struct file_operations irq_affinity_list_proc_fops = {
.write = irq_affinity_list_proc_write,
};
#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(EFFECTIVE, m);
}
static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
{
return show_irq_affinity(EFFECTIVE_LIST, m);
}
static int irq_effective_aff_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, irq_effective_aff_proc_show, PDE_DATA(inode));
}
static int irq_effective_aff_list_proc_open(struct inode *inode,
struct file *file)
{
return single_open(file, irq_effective_aff_list_proc_show,
PDE_DATA(inode));
}
static const struct file_operations irq_effective_aff_proc_fops = {
.open = irq_effective_aff_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static const struct file_operations irq_effective_aff_list_proc_fops = {
.open = irq_effective_aff_list_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
static int default_affinity_show(struct seq_file *m, void *v)
{
seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
@ -364,6 +430,12 @@ void register_irq_proc(unsigned int irq, struct irq_desc *desc)
proc_create_data("node", 0444, desc->dir,
&irq_node_proc_fops, irqp);
# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
proc_create_data("effective_affinity", 0444, desc->dir,
&irq_effective_aff_proc_fops, irqp);
proc_create_data("effective_affinity_list", 0444, desc->dir,
&irq_effective_aff_list_proc_fops, irqp);
# endif
#endif
proc_create_data("spurious", 0444, desc->dir,
&irq_spurious_proc_fops, (void *)(long)irq);
@ -383,6 +455,10 @@ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
remove_proc_entry("affinity_hint", desc->dir);
remove_proc_entry("smp_affinity_list", desc->dir);
remove_proc_entry("node", desc->dir);
# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
remove_proc_entry("effective_affinity", desc->dir);
remove_proc_entry("effective_affinity_list", desc->dir);
# endif
#endif
remove_proc_entry("spurious", desc->dir);