forked from luck/tmp_suning_uos_patched
genirq: Provide forced interrupt threading
Add a commandline parameter "threadirqs" which forces all interrupts except those marked IRQF_NO_THREAD to run threaded. That's mostly a debug option to allow retrieving better debug data from crashing interrupt handlers. If "threadirqs" is not enabled on the kernel command line, then there is no impact in the interrupt hotpath. Architecture code needs to select CONFIG_IRQ_FORCED_THREADING after marking the interrupts which cant be threaded IRQF_NO_THREAD. All interrupts which have IRQF_TIMER set are implict marked IRQF_NO_THREAD. Also all PER_CPU interrupts are excluded. Forced threading hard interrupts also forces all soft interrupt handling into thread context. When enabled it might slow down things a bit, but for debugging problems in interrupt code it's a reasonable penalty as it does not immediately crash and burn the machine when an interrupt handler is buggy. Some test results on a Core2Duo machine: Cache cold run of: # time git grep irq_desc non-threaded threaded real 1m18.741s 1m19.061s user 0m1.874s 0m1.757s sys 0m5.843s 0m5.427s # iperf -c server non-threaded [ 3] 0.0-10.0 sec 1.09 GBytes 933 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 934 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 933 Mbits/sec threaded [ 3] 0.0-10.0 sec 1.09 GBytes 939 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 934 Mbits/sec [ 3] 0.0-10.0 sec 1.09 GBytes 937 Mbits/sec Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <20110223234956.772668648@linutronix.de>
This commit is contained in:
parent
8eb90c30e0
commit
8d32a307e4
|
@ -2436,6 +2436,10 @@ and is between 256 and 4096 characters. It is defined in the file
|
|||
<deci-seconds>: poll all this frequency
|
||||
0: no polling (default)
|
||||
|
||||
threadirqs [KNL]
|
||||
Force threading of all interrupt handlers except those
|
||||
marked explicitely IRQF_NO_THREAD.
|
||||
|
||||
topology= [S390]
|
||||
Format: {off | on}
|
||||
Specify if the kernel should make use of the cpu
|
||||
|
|
|
@ -383,6 +383,13 @@ static inline int disable_irq_wake(unsigned int irq)
|
|||
}
|
||||
#endif /* CONFIG_GENERIC_HARDIRQS */
|
||||
|
||||
|
||||
#ifdef CONFIG_IRQ_FORCED_THREADING
|
||||
extern bool force_irqthreads;
|
||||
#else
|
||||
#define force_irqthreads (0)
|
||||
#endif
|
||||
|
||||
#ifndef __ARCH_SET_SOFTIRQ_PENDING
|
||||
#define set_softirq_pending(x) (local_softirq_pending() = (x))
|
||||
#define or_softirq_pending(x) (local_softirq_pending() |= (x))
|
||||
|
|
|
@ -38,6 +38,9 @@ config HARDIRQS_SW_RESEND
|
|||
config IRQ_PREFLOW_FASTEOI
|
||||
bool
|
||||
|
||||
config IRQ_FORCED_THREADING
|
||||
bool
|
||||
|
||||
config SPARSE_IRQ
|
||||
bool "Support sparse irq numbering"
|
||||
depends on HAVE_SPARSE_IRQ
|
||||
|
|
|
@ -27,12 +27,14 @@ extern int noirqdebug;
|
|||
* IRQTF_DIED - handler thread died
|
||||
* IRQTF_WARNED - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
|
||||
* IRQTF_AFFINITY - irq thread is requested to adjust affinity
|
||||
* IRQTF_FORCED_THREAD - irq action is force threaded
|
||||
*/
|
||||
enum {
|
||||
IRQTF_RUNTHREAD,
|
||||
IRQTF_DIED,
|
||||
IRQTF_WARNED,
|
||||
IRQTF_AFFINITY,
|
||||
IRQTF_FORCED_THREAD,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -17,6 +17,17 @@
|
|||
|
||||
#include "internals.h"
|
||||
|
||||
#ifdef CONFIG_IRQ_FORCED_THREADING
|
||||
__read_mostly bool force_irqthreads;
|
||||
|
||||
static int __init setup_forced_irqthreads(char *arg)
|
||||
{
|
||||
force_irqthreads = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("threadirqs", setup_forced_irqthreads);
|
||||
#endif
|
||||
|
||||
/**
|
||||
* synchronize_irq - wait for pending IRQ handlers (on other CPUs)
|
||||
* @irq: interrupt number to wait for
|
||||
|
@ -701,6 +712,32 @@ static inline void
|
|||
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Interrupts which are not explicitely requested as threaded
|
||||
* interrupts rely on the implicit bh/preempt disable of the hard irq
|
||||
* context. So we need to disable bh here to avoid deadlocks and other
|
||||
* side effects.
|
||||
*/
|
||||
static void
|
||||
irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
|
||||
{
|
||||
local_bh_disable();
|
||||
action->thread_fn(action->irq, action->dev_id);
|
||||
irq_finalize_oneshot(desc, action, false);
|
||||
local_bh_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupts explicitely requested as threaded interupts want to be
|
||||
* preemtible - many of them need to sleep and wait for slow busses to
|
||||
* complete.
|
||||
*/
|
||||
static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
|
||||
{
|
||||
action->thread_fn(action->irq, action->dev_id);
|
||||
irq_finalize_oneshot(desc, action, false);
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupt handler thread
|
||||
*/
|
||||
|
@ -711,8 +748,15 @@ static int irq_thread(void *data)
|
|||
};
|
||||
struct irqaction *action = data;
|
||||
struct irq_desc *desc = irq_to_desc(action->irq);
|
||||
void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
|
||||
int wake;
|
||||
|
||||
if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
|
||||
&action->thread_flags))
|
||||
handler_fn = irq_forced_thread_fn;
|
||||
else
|
||||
handler_fn = irq_thread_fn;
|
||||
|
||||
sched_setscheduler(current, SCHED_FIFO, ¶m);
|
||||
current->irqaction = action;
|
||||
|
||||
|
@ -736,10 +780,7 @@ static int irq_thread(void *data)
|
|||
raw_spin_unlock_irq(&desc->lock);
|
||||
} else {
|
||||
raw_spin_unlock_irq(&desc->lock);
|
||||
|
||||
action->thread_fn(action->irq, action->dev_id);
|
||||
|
||||
irq_finalize_oneshot(desc, action, false);
|
||||
handler_fn(desc, action);
|
||||
}
|
||||
|
||||
wake = atomic_dec_and_test(&desc->threads_active);
|
||||
|
@ -789,6 +830,22 @@ void exit_irq_thread(void)
|
|||
set_bit(IRQTF_DIED, &tsk->irqaction->flags);
|
||||
}
|
||||
|
||||
static void irq_setup_forced_threading(struct irqaction *new)
|
||||
{
|
||||
if (!force_irqthreads)
|
||||
return;
|
||||
if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
|
||||
return;
|
||||
|
||||
new->flags |= IRQF_ONESHOT;
|
||||
|
||||
if (!new->thread_fn) {
|
||||
set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
|
||||
new->thread_fn = new->handler;
|
||||
new->handler = irq_default_primary_handler;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Internal function to register an irqaction - typically used to
|
||||
* allocate special interrupts that are part of the architecture.
|
||||
|
@ -838,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
|
|||
* dummy function which warns when called.
|
||||
*/
|
||||
new->handler = irq_nested_primary_handler;
|
||||
} else {
|
||||
irq_setup_forced_threading(new);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -311,9 +311,21 @@ void irq_enter(void)
|
|||
}
|
||||
|
||||
#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
|
||||
# define invoke_softirq() __do_softirq()
|
||||
static inline void invoke_softirq(void)
|
||||
{
|
||||
if (!force_irqthreads)
|
||||
__do_softirq();
|
||||
else
|
||||
wakeup_softirqd();
|
||||
}
|
||||
#else
|
||||
# define invoke_softirq() do_softirq()
|
||||
static inline void invoke_softirq(void)
|
||||
{
|
||||
if (!force_irqthreads)
|
||||
do_softirq();
|
||||
else
|
||||
wakeup_softirqd();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
|
Loading…
Reference in New Issue
Block a user