forked from luck/tmp_suning_uos_patched
4308ad8011
When an interrupt is disabled and torn down, the CPU mask returned through affinity_hint right now is all CPUs. Also, for drivers that don't provide an affinity_hint mask, this can be misleading. There should be no hint at all, meaning an empty CPU mask. [ tglx: use zalloc_cpumask_var instead of clearing it under the lock ] Signed-off-by: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com> Cc: davem@davemloft.net Cc: arjan@linux.jf.intel.com Cc: bhutchings@solarflare.com LKML-Reference: <20100505205638.5426.87189.stgit@ppwaskie-hc2.jf.intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
342 lines
7.7 KiB
C
342 lines
7.7 KiB
C
/*
|
|
* linux/kernel/irq/proc.c
|
|
*
|
|
* Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
|
|
*
|
|
* This file contains the /proc/irq/ handling code.
|
|
*/
|
|
|
|
#include <linux/irq.h>
|
|
#include <linux/gfp.h>
|
|
#include <linux/proc_fs.h>
|
|
#include <linux/seq_file.h>
|
|
#include <linux/interrupt.h>
|
|
|
|
#include "internals.h"
|
|
|
|
static struct proc_dir_entry *root_irq_dir;
|
|
|
|
#ifdef CONFIG_SMP
|
|
|
|
static int irq_affinity_proc_show(struct seq_file *m, void *v)
|
|
{
|
|
struct irq_desc *desc = irq_to_desc((long)m->private);
|
|
const struct cpumask *mask = desc->affinity;
|
|
|
|
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
|
if (desc->status & IRQ_MOVE_PENDING)
|
|
mask = desc->pending_mask;
|
|
#endif
|
|
seq_cpumask(m, mask);
|
|
seq_putc(m, '\n');
|
|
return 0;
|
|
}
|
|
|
|
static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
|
|
{
|
|
struct irq_desc *desc = irq_to_desc((long)m->private);
|
|
unsigned long flags;
|
|
cpumask_var_t mask;
|
|
|
|
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
|
|
return -ENOMEM;
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
if (desc->affinity_hint)
|
|
cpumask_copy(mask, desc->affinity_hint);
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
|
|
seq_cpumask(m, mask);
|
|
seq_putc(m, '\n');
|
|
free_cpumask_var(mask);
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifndef is_affinity_mask_valid
|
|
#define is_affinity_mask_valid(val) 1
|
|
#endif
|
|
|
|
int no_irq_affinity;
|
|
static ssize_t irq_affinity_proc_write(struct file *file,
|
|
const char __user *buffer, size_t count, loff_t *pos)
|
|
{
|
|
unsigned int irq = (int)(long)PDE(file->f_path.dentry->d_inode)->data;
|
|
cpumask_var_t new_value;
|
|
int err;
|
|
|
|
if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
|
|
irq_balancing_disabled(irq))
|
|
return -EIO;
|
|
|
|
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
|
|
return -ENOMEM;
|
|
|
|
err = cpumask_parse_user(buffer, count, new_value);
|
|
if (err)
|
|
goto free_cpumask;
|
|
|
|
if (!is_affinity_mask_valid(new_value)) {
|
|
err = -EINVAL;
|
|
goto free_cpumask;
|
|
}
|
|
|
|
/*
|
|
* Do not allow disabling IRQs completely - it's a too easy
|
|
* way to make the system unusable accidentally :-) At least
|
|
* one online CPU still has to be targeted.
|
|
*/
|
|
if (!cpumask_intersects(new_value, cpu_online_mask)) {
|
|
/* Special case for empty set - allow the architecture
|
|
code to set default SMP affinity. */
|
|
err = irq_select_affinity_usr(irq) ? -EINVAL : count;
|
|
} else {
|
|
irq_set_affinity(irq, new_value);
|
|
err = count;
|
|
}
|
|
|
|
free_cpumask:
|
|
free_cpumask_var(new_value);
|
|
return err;
|
|
}
|
|
|
|
static int irq_affinity_proc_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, irq_affinity_proc_show, PDE(inode)->data);
|
|
}
|
|
|
|
static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, irq_affinity_hint_proc_show, PDE(inode)->data);
|
|
}
|
|
|
|
static const struct file_operations irq_affinity_proc_fops = {
|
|
.open = irq_affinity_proc_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
.write = irq_affinity_proc_write,
|
|
};
|
|
|
|
static const struct file_operations irq_affinity_hint_proc_fops = {
|
|
.open = irq_affinity_hint_proc_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
static int default_affinity_show(struct seq_file *m, void *v)
|
|
{
|
|
seq_cpumask(m, irq_default_affinity);
|
|
seq_putc(m, '\n');
|
|
return 0;
|
|
}
|
|
|
|
static ssize_t default_affinity_write(struct file *file,
|
|
const char __user *buffer, size_t count, loff_t *ppos)
|
|
{
|
|
cpumask_var_t new_value;
|
|
int err;
|
|
|
|
if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
|
|
return -ENOMEM;
|
|
|
|
err = cpumask_parse_user(buffer, count, new_value);
|
|
if (err)
|
|
goto out;
|
|
|
|
if (!is_affinity_mask_valid(new_value)) {
|
|
err = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
/*
|
|
* Do not allow disabling IRQs completely - it's a too easy
|
|
* way to make the system unusable accidentally :-) At least
|
|
* one online CPU still has to be targeted.
|
|
*/
|
|
if (!cpumask_intersects(new_value, cpu_online_mask)) {
|
|
err = -EINVAL;
|
|
goto out;
|
|
}
|
|
|
|
cpumask_copy(irq_default_affinity, new_value);
|
|
err = count;
|
|
|
|
out:
|
|
free_cpumask_var(new_value);
|
|
return err;
|
|
}
|
|
|
|
static int default_affinity_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, default_affinity_show, PDE(inode)->data);
|
|
}
|
|
|
|
static const struct file_operations default_affinity_proc_fops = {
|
|
.open = default_affinity_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
.write = default_affinity_write,
|
|
};
|
|
|
|
static int irq_node_proc_show(struct seq_file *m, void *v)
|
|
{
|
|
struct irq_desc *desc = irq_to_desc((long) m->private);
|
|
|
|
seq_printf(m, "%d\n", desc->node);
|
|
return 0;
|
|
}
|
|
|
|
static int irq_node_proc_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, irq_node_proc_show, PDE(inode)->data);
|
|
}
|
|
|
|
static const struct file_operations irq_node_proc_fops = {
|
|
.open = irq_node_proc_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
};
|
|
#endif
|
|
|
|
static int irq_spurious_proc_show(struct seq_file *m, void *v)
|
|
{
|
|
struct irq_desc *desc = irq_to_desc((long) m->private);
|
|
|
|
seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
|
|
desc->irq_count, desc->irqs_unhandled,
|
|
jiffies_to_msecs(desc->last_unhandled));
|
|
return 0;
|
|
}
|
|
|
|
static int irq_spurious_proc_open(struct inode *inode, struct file *file)
|
|
{
|
|
return single_open(file, irq_spurious_proc_show, NULL);
|
|
}
|
|
|
|
static const struct file_operations irq_spurious_proc_fops = {
|
|
.open = irq_spurious_proc_open,
|
|
.read = seq_read,
|
|
.llseek = seq_lseek,
|
|
.release = single_release,
|
|
};
|
|
|
|
#define MAX_NAMELEN 128
|
|
|
|
static int name_unique(unsigned int irq, struct irqaction *new_action)
|
|
{
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
struct irqaction *action;
|
|
unsigned long flags;
|
|
int ret = 1;
|
|
|
|
raw_spin_lock_irqsave(&desc->lock, flags);
|
|
for (action = desc->action ; action; action = action->next) {
|
|
if ((action != new_action) && action->name &&
|
|
!strcmp(new_action->name, action->name)) {
|
|
ret = 0;
|
|
break;
|
|
}
|
|
}
|
|
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
|
return ret;
|
|
}
|
|
|
|
void register_handler_proc(unsigned int irq, struct irqaction *action)
|
|
{
|
|
char name [MAX_NAMELEN];
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
if (!desc->dir || action->dir || !action->name ||
|
|
!name_unique(irq, action))
|
|
return;
|
|
|
|
memset(name, 0, MAX_NAMELEN);
|
|
snprintf(name, MAX_NAMELEN, "%s", action->name);
|
|
|
|
/* create /proc/irq/1234/handler/ */
|
|
action->dir = proc_mkdir(name, desc->dir);
|
|
}
|
|
|
|
#undef MAX_NAMELEN
|
|
|
|
#define MAX_NAMELEN 10
|
|
|
|
void register_irq_proc(unsigned int irq, struct irq_desc *desc)
|
|
{
|
|
char name [MAX_NAMELEN];
|
|
|
|
if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
|
|
return;
|
|
|
|
memset(name, 0, MAX_NAMELEN);
|
|
sprintf(name, "%d", irq);
|
|
|
|
/* create /proc/irq/1234 */
|
|
desc->dir = proc_mkdir(name, root_irq_dir);
|
|
if (!desc->dir)
|
|
return;
|
|
|
|
#ifdef CONFIG_SMP
|
|
/* create /proc/irq/<irq>/smp_affinity */
|
|
proc_create_data("smp_affinity", 0600, desc->dir,
|
|
&irq_affinity_proc_fops, (void *)(long)irq);
|
|
|
|
/* create /proc/irq/<irq>/affinity_hint */
|
|
proc_create_data("affinity_hint", 0400, desc->dir,
|
|
&irq_affinity_hint_proc_fops, (void *)(long)irq);
|
|
|
|
proc_create_data("node", 0444, desc->dir,
|
|
&irq_node_proc_fops, (void *)(long)irq);
|
|
#endif
|
|
|
|
proc_create_data("spurious", 0444, desc->dir,
|
|
&irq_spurious_proc_fops, (void *)(long)irq);
|
|
}
|
|
|
|
#undef MAX_NAMELEN
|
|
|
|
void unregister_handler_proc(unsigned int irq, struct irqaction *action)
|
|
{
|
|
if (action->dir) {
|
|
struct irq_desc *desc = irq_to_desc(irq);
|
|
|
|
remove_proc_entry(action->dir->name, desc->dir);
|
|
}
|
|
}
|
|
|
|
static void register_default_affinity_proc(void)
|
|
{
|
|
#ifdef CONFIG_SMP
|
|
proc_create("irq/default_smp_affinity", 0600, NULL,
|
|
&default_affinity_proc_fops);
|
|
#endif
|
|
}
|
|
|
|
void init_irq_proc(void)
|
|
{
|
|
unsigned int irq;
|
|
struct irq_desc *desc;
|
|
|
|
/* create /proc/irq */
|
|
root_irq_dir = proc_mkdir("irq", NULL);
|
|
if (!root_irq_dir)
|
|
return;
|
|
|
|
register_default_affinity_proc();
|
|
|
|
/*
|
|
* Create entries for all existing IRQs.
|
|
*/
|
|
for_each_irq_desc(irq, desc) {
|
|
if (!desc)
|
|
continue;
|
|
|
|
register_irq_proc(irq, desc);
|
|
}
|
|
}
|
|
|