forked from luck/tmp_suning_uos_patched
genirq: Add IRQ affinity notifiers
When initiating I/O on a multiqueue and multi-IRQ device, we may want to select a queue for which the response will be handled on the same or a nearby CPU. This requires a reverse-map of IRQ affinity. Add a notification mechanism to support this. This is based closely on work by Thomas Gleixner <tglx@linutronix.de>. Signed-off-by: Ben Hutchings <bhutchings@solarflare.com> Cc: linux-net-drivers@solarflare.com Cc: Tom Herbert <therbert@google.com> Cc: David Miller <davem@davemloft.net> LKML-Reference: <1295470904.11126.84.camel@bwh-desktop> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
parent
1bae4ce27c
commit
cd7eab44e9
|
@ -14,6 +14,8 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/workqueue.h>
|
||||
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
@ -240,6 +242,35 @@ extern int irq_can_set_affinity(unsigned int irq);
|
|||
extern int irq_select_affinity(unsigned int irq);
|
||||
|
||||
extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
|
||||
|
||||
/**
|
||||
* struct irq_affinity_notify - context for notification of IRQ affinity changes
|
||||
* @irq: Interrupt to which notification applies
|
||||
* @kref: Reference count, for internal use
|
||||
* @work: Work item, for internal use
|
||||
* @notify: Function to be called on change. This will be
|
||||
* called in process context.
|
||||
* @release: Function to be called on release. This will be
|
||||
* called in process context. Once registered, the
|
||||
* structure must only be freed when this function is
|
||||
* called or later.
|
||||
*/
|
||||
struct irq_affinity_notify {
|
||||
unsigned int irq;
|
||||
struct kref kref;
|
||||
struct work_struct work;
|
||||
void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
|
||||
void (*release)(struct kref *ref);
|
||||
};
|
||||
|
||||
extern int
|
||||
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
|
||||
|
||||
static inline void irq_run_affinity_notifiers(void)
|
||||
{
|
||||
flush_scheduled_work();
|
||||
}
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
|
||||
|
@ -255,7 +286,7 @@ static inline int irq_can_set_affinity(unsigned int irq)
|
|||
static inline int irq_select_affinity(unsigned int irq) { return 0; }
|
||||
|
||||
static inline int irq_set_affinity_hint(unsigned int irq,
|
||||
const struct cpumask *m)
|
||||
const struct cpumask *m)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
* For now it's included from <linux/irq.h>
|
||||
*/
|
||||
|
||||
struct irq_affinity_notify;
|
||||
struct proc_dir_entry;
|
||||
struct timer_rand_state;
|
||||
/**
|
||||
|
@ -24,6 +25,7 @@ struct timer_rand_state;
|
|||
* @last_unhandled: aging timer for unhandled count
|
||||
* @irqs_unhandled: stats field for spurious unhandled interrupts
|
||||
* @lock: locking for SMP
|
||||
* @affinity_notify: context for notification of affinity changes
|
||||
* @pending_mask: pending rebalanced interrupts
|
||||
* @threads_active: number of irqaction threads currently running
|
||||
* @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
|
||||
|
@ -70,6 +72,7 @@ struct irq_desc {
|
|||
raw_spinlock_t lock;
|
||||
#ifdef CONFIG_SMP
|
||||
const struct cpumask *affinity_hint;
|
||||
struct irq_affinity_notify *affinity_notify;
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
cpumask_var_t pending_mask;
|
||||
#endif
|
||||
|
|
|
@ -134,6 +134,10 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
|
|||
irq_set_thread_affinity(desc);
|
||||
}
|
||||
#endif
|
||||
if (desc->affinity_notify) {
|
||||
kref_get(&desc->affinity_notify->kref);
|
||||
schedule_work(&desc->affinity_notify->work);
|
||||
}
|
||||
desc->status |= IRQ_AFFINITY_SET;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
return 0;
|
||||
|
@ -155,6 +159,79 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
|
||||
|
||||
static void irq_affinity_notify(struct work_struct *work)
|
||||
{
|
||||
struct irq_affinity_notify *notify =
|
||||
container_of(work, struct irq_affinity_notify, work);
|
||||
struct irq_desc *desc = irq_to_desc(notify->irq);
|
||||
cpumask_var_t cpumask;
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc)
|
||||
goto out;
|
||||
|
||||
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
|
||||
goto out;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
#ifdef CONFIG_GENERIC_PENDING_IRQ
|
||||
if (desc->status & IRQ_MOVE_PENDING)
|
||||
cpumask_copy(cpumask, desc->pending_mask);
|
||||
else
|
||||
#endif
|
||||
cpumask_copy(cpumask, desc->affinity);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
notify->notify(notify, cpumask);
|
||||
|
||||
free_cpumask_var(cpumask);
|
||||
out:
|
||||
kref_put(¬ify->kref, notify->release);
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_set_affinity_notifier - control notification of IRQ affinity changes
|
||||
* @irq: Interrupt for which to enable/disable notification
|
||||
* @notify: Context for notification, or %NULL to disable
|
||||
* notification. Function pointers must be initialised;
|
||||
* the other fields will be initialised by this function.
|
||||
*
|
||||
* Must be called in process context. Notification may only be enabled
|
||||
* after the IRQ is allocated and must be disabled before the IRQ is
|
||||
* freed using free_irq().
|
||||
*/
|
||||
int
|
||||
irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
struct irq_affinity_notify *old_notify;
|
||||
unsigned long flags;
|
||||
|
||||
/* The release function is promised process context */
|
||||
might_sleep();
|
||||
|
||||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* Complete initialisation of *notify */
|
||||
if (notify) {
|
||||
notify->irq = irq;
|
||||
kref_init(¬ify->kref);
|
||||
INIT_WORK(¬ify->work, irq_affinity_notify);
|
||||
}
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
old_notify = desc->affinity_notify;
|
||||
desc->affinity_notify = notify;
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
||||
if (old_notify)
|
||||
kref_put(&old_notify->kref, old_notify->release);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
|
||||
|
||||
#ifndef CONFIG_AUTO_IRQ_AFFINITY
|
||||
/*
|
||||
* Generic version of the affinity autoselector.
|
||||
|
@ -1004,6 +1081,11 @@ void free_irq(unsigned int irq, void *dev_id)
|
|||
if (!desc)
|
||||
return;
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (WARN_ON(desc->affinity_notify))
|
||||
desc->affinity_notify = NULL;
|
||||
#endif
|
||||
|
||||
chip_bus_lock(desc);
|
||||
kfree(__free_irq(irq, dev_id));
|
||||
chip_bus_sync_unlock(desc);
|
||||
|
|
Loading…
Reference in New Issue
Block a user