forked from luck/tmp_suning_uos_patched
s390: convert interrupt handling to use generic hardirq
With the introduction of PCI it became apparent that s390 should convert to generic hardirqs as too many drivers do not have the correct dependency for GENERIC_HARDIRQS. On the architecture level s390 does not have irq lines. It has external interrupts, I/O interrupts and adapter interrupts. This patch hard-codes all external interrupts as irq #1, all I/O interrupts as irq #2 and all adapter interrupts as irq #3. The additional information from the lowcore associated with the interrupt is stored in the pt_regs of the interrupt frame, where the interrupt handler can pick it up. For PCI/MSI interrupts the adapter interrupt handler scans the relevant bit fields and calls generic_handle_irq with the virtual irq number for the MSI interrupt. Reviewed-by: Sebastian Ott <sebott@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
parent
5d0d8f4353
commit
1f44a22577
|
@ -116,6 +116,7 @@ config S390
|
|||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_TRACE_MCOUNT_TEST
|
||||
select HAVE_GENERIC_HARDIRQS
|
||||
select HAVE_KERNEL_BZIP2
|
||||
select HAVE_KERNEL_GZIP
|
||||
select HAVE_KERNEL_LZ4
|
||||
|
@ -445,6 +446,16 @@ config PCI_NR_FUNCTIONS
|
|||
This allows you to specify the maximum number of PCI functions which
|
||||
this kernel will support.
|
||||
|
||||
config PCI_NR_MSI
|
||||
int "Maximum number of MSI interrupts (64-32768)"
|
||||
range 64 32768
|
||||
default "256"
|
||||
help
|
||||
This defines the number of virtual interrupts the kernel will
|
||||
provide for MSI interrupts. If you configure your system to have
|
||||
too few drivers will fail to allocate MSI interrupts for all
|
||||
PCI devices.
|
||||
|
||||
source "drivers/pci/Kconfig"
|
||||
source "drivers/pci/pcie/Kconfig"
|
||||
source "drivers/pci/hotplug/Kconfig"
|
||||
|
|
|
@ -20,4 +20,9 @@
|
|||
|
||||
#define HARDIRQ_BITS 8
|
||||
|
||||
static inline void ack_bad_irq(unsigned int irq)
|
||||
{
|
||||
printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
|
||||
}
|
||||
|
||||
#endif /* __ASM_HARDIRQ_H */
|
||||
|
|
|
@ -4,19 +4,8 @@
|
|||
#include <linux/msi.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
static inline struct msi_desc *irq_get_msi_desc(unsigned int irq)
|
||||
{
|
||||
return __irq_get_msi_desc(irq);
|
||||
}
|
||||
|
||||
/* Must be called with msi map lock held */
|
||||
static inline int irq_set_msi_desc(unsigned int irq, struct msi_desc *msi)
|
||||
{
|
||||
if (!msi)
|
||||
return -EINVAL;
|
||||
|
||||
msi->irq = irq;
|
||||
return 0;
|
||||
}
|
||||
void __init init_airq_interrupts(void);
|
||||
void __init init_cio_interrupts(void);
|
||||
void __init init_ext_interrupts(void);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -1,17 +1,28 @@
|
|||
#ifndef _ASM_IRQ_H
|
||||
#define _ASM_IRQ_H
|
||||
|
||||
#define EXT_INTERRUPT 1
|
||||
#define IO_INTERRUPT 2
|
||||
#define THIN_INTERRUPT 3
|
||||
|
||||
#define NR_IRQS_BASE 4
|
||||
|
||||
#ifdef CONFIG_PCI_NR_MSI
|
||||
# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
|
||||
#else
|
||||
# define NR_IRQS NR_IRQS_BASE
|
||||
#endif
|
||||
|
||||
/* This number is used when no interrupt has been assigned */
|
||||
#define NO_IRQ 0
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
enum interruption_main_class {
|
||||
EXTERNAL_INTERRUPT,
|
||||
IO_INTERRUPT,
|
||||
NR_IRQS
|
||||
};
|
||||
|
||||
enum interruption_class {
|
||||
IRQEXT_CLK,
|
||||
IRQEXT_EXC,
|
||||
|
@ -72,14 +83,8 @@ void service_subclass_irq_unregister(void);
|
|||
void measurement_alert_subclass_register(void);
|
||||
void measurement_alert_subclass_unregister(void);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
# define disable_irq_nosync_lockdep(irq) disable_irq_nosync(irq)
|
||||
# define disable_irq_nosync_lockdep_irqsave(irq, flags) \
|
||||
disable_irq_nosync(irq)
|
||||
# define disable_irq_lockdep(irq) disable_irq(irq)
|
||||
# define enable_irq_lockdep(irq) enable_irq(irq)
|
||||
# define enable_irq_lockdep_irqrestore(irq, flags) \
|
||||
enable_irq(irq)
|
||||
#endif
|
||||
#define irq_canonicalize(irq) (irq)
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_IRQ_H */
|
||||
|
|
|
@ -53,12 +53,6 @@ struct zpci_fmb {
|
|||
atomic64_t unmapped_pages;
|
||||
} __packed __aligned(16);
|
||||
|
||||
struct msi_map {
|
||||
unsigned long irq;
|
||||
struct msi_desc *msi;
|
||||
struct hlist_node msi_chain;
|
||||
};
|
||||
|
||||
#define ZPCI_MSI_VEC_BITS 11
|
||||
#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
|
||||
#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
|
||||
|
@ -92,8 +86,6 @@ struct zpci_dev {
|
|||
|
||||
/* IRQ stuff */
|
||||
u64 msi_addr; /* MSI address */
|
||||
struct zdev_irq_map *irq_map;
|
||||
struct msi_map *msi_map;
|
||||
struct airq_iv *aibv; /* adapter interrupt bit vector */
|
||||
unsigned int aisb; /* number of the summary bit */
|
||||
|
||||
|
@ -153,14 +145,6 @@ int clp_add_pci_device(u32, u32, int);
|
|||
int clp_enable_fh(struct zpci_dev *, u8);
|
||||
int clp_disable_fh(struct zpci_dev *);
|
||||
|
||||
/* MSI */
|
||||
struct msi_desc *__irq_get_msi_desc(unsigned int);
|
||||
int zpci_msi_set_mask_bits(struct msi_desc *, u32, u32);
|
||||
int zpci_setup_msi_irq(struct zpci_dev *, struct msi_desc *, unsigned int, int);
|
||||
void zpci_teardown_msi_irq(struct zpci_dev *, struct msi_desc *);
|
||||
int zpci_msihash_init(void);
|
||||
void zpci_msihash_exit(void);
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
/* Error handling and recovery */
|
||||
void zpci_event_error(void *);
|
||||
|
|
6
arch/s390/include/asm/serial.h
Normal file
6
arch/s390/include/asm/serial.h
Normal file
|
@ -0,0 +1,6 @@
|
|||
#ifndef _ASM_S390_SERIAL_H
|
||||
#define _ASM_S390_SERIAL_H
|
||||
|
||||
#define BASE_BAUD 0
|
||||
|
||||
#endif /* _ASM_S390_SERIAL_H */
|
|
@ -18,6 +18,7 @@
|
|||
#include <asm/unistd.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/sigp.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
__PT_R0 = __PT_GPRS
|
||||
__PT_R1 = __PT_GPRS + 4
|
||||
|
@ -435,6 +436,11 @@ io_skip:
|
|||
io_loop:
|
||||
l %r1,BASED(.Ldo_IRQ)
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
lhi %r3,IO_INTERRUPT
|
||||
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
|
||||
jz io_call
|
||||
lhi %r3,THIN_INTERRUPT
|
||||
io_call:
|
||||
basr %r14,%r1 # call do_IRQ
|
||||
tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
|
||||
jz io_return
|
||||
|
@ -584,9 +590,10 @@ ext_skip:
|
|||
mvc __PT_INT_CODE(4,%r11),__LC_EXT_CPU_ADDR
|
||||
mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
|
||||
TRACE_IRQS_OFF
|
||||
l %r1,BASED(.Ldo_IRQ)
|
||||
lr %r2,%r11 # pass pointer to pt_regs
|
||||
l %r1,BASED(.Ldo_extint)
|
||||
basr %r14,%r1 # call do_extint
|
||||
lhi %r3,EXT_INTERRUPT
|
||||
basr %r14,%r1 # call do_IRQ
|
||||
j io_return
|
||||
|
||||
/*
|
||||
|
@ -902,7 +909,6 @@ cleanup_idle_wait:
|
|||
.Ldo_machine_check: .long s390_do_machine_check
|
||||
.Lhandle_mcck: .long s390_handle_mcck
|
||||
.Ldo_IRQ: .long do_IRQ
|
||||
.Ldo_extint: .long do_extint
|
||||
.Ldo_signal: .long do_signal
|
||||
.Ldo_notify_resume: .long do_notify_resume
|
||||
.Ldo_per_trap: .long do_per_trap
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include <asm/unistd.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/sigp.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
__PT_R0 = __PT_GPRS
|
||||
__PT_R1 = __PT_GPRS + 8
|
||||
|
@ -468,6 +469,11 @@ io_skip:
|
|||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
io_loop:
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
lghi %r3,IO_INTERRUPT
|
||||
tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
|
||||
jz io_call
|
||||
lghi %r3,THIN_INTERRUPT
|
||||
io_call:
|
||||
brasl %r14,do_IRQ
|
||||
tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
|
||||
jz io_return
|
||||
|
@ -623,7 +629,8 @@ ext_skip:
|
|||
TRACE_IRQS_OFF
|
||||
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
|
||||
lgr %r2,%r11 # pass pointer to pt_regs
|
||||
brasl %r14,do_extint
|
||||
lghi %r3,EXT_INTERRUPT
|
||||
brasl %r14,do_IRQ
|
||||
j io_return
|
||||
|
||||
/*
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <asm/cputime.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include "entry.h"
|
||||
|
||||
DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
|
||||
|
@ -42,9 +43,10 @@ struct irq_class {
|
|||
* Since the external and I/O interrupt fields are already sums we would end
|
||||
* up with having a sum which accounts each interrupt twice.
|
||||
*/
|
||||
static const struct irq_class irqclass_main_desc[NR_IRQS] = {
|
||||
[EXTERNAL_INTERRUPT] = {.name = "EXT"},
|
||||
[IO_INTERRUPT] = {.name = "I/O"}
|
||||
static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
|
||||
[EXT_INTERRUPT] = {.name = "EXT"},
|
||||
[IO_INTERRUPT] = {.name = "I/O"},
|
||||
[THIN_INTERRUPT] = {.name = "AIO"},
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -86,6 +88,28 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
|
|||
[CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"},
|
||||
};
|
||||
|
||||
void __init init_IRQ(void)
|
||||
{
|
||||
irq_reserve_irqs(0, THIN_INTERRUPT);
|
||||
init_cio_interrupts();
|
||||
init_airq_interrupts();
|
||||
init_ext_interrupts();
|
||||
}
|
||||
|
||||
void do_IRQ(struct pt_regs *regs, int irq)
|
||||
{
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
old_regs = set_irq_regs(regs);
|
||||
irq_enter();
|
||||
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
|
||||
/* Serve timer interrupts first. */
|
||||
clock_comparator_work();
|
||||
generic_handle_irq(irq);
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* show_interrupts is needed by /proc/interrupts.
|
||||
*/
|
||||
|
@ -100,27 +124,36 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
for_each_online_cpu(cpu)
|
||||
seq_printf(p, "CPU%d ", cpu);
|
||||
seq_putc(p, '\n');
|
||||
goto out;
|
||||
}
|
||||
if (irq < NR_IRQS) {
|
||||
if (irq >= NR_IRQS_BASE)
|
||||
goto out;
|
||||
seq_printf(p, "%s: ", irqclass_main_desc[irq].name);
|
||||
for_each_online_cpu(cpu)
|
||||
seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]);
|
||||
seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
|
||||
seq_putc(p, '\n');
|
||||
goto skip_arch_irqs;
|
||||
goto out;
|
||||
}
|
||||
for (irq = 0; irq < NR_ARCH_IRQS; irq++) {
|
||||
seq_printf(p, "%s: ", irqclass_sub_desc[irq].name);
|
||||
for_each_online_cpu(cpu)
|
||||
seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]);
|
||||
seq_printf(p, "%10u ",
|
||||
per_cpu(irq_stat, cpu).irqs[irq]);
|
||||
if (irqclass_sub_desc[irq].desc)
|
||||
seq_printf(p, " %s", irqclass_sub_desc[irq].desc);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
skip_arch_irqs:
|
||||
out:
|
||||
put_online_cpus();
|
||||
return 0;
|
||||
}
|
||||
|
||||
int arch_show_interrupts(struct seq_file *p, int prec)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch to the asynchronous interrupt stack for softirq execution.
|
||||
*/
|
||||
|
@ -159,14 +192,6 @@ asmlinkage void do_softirq(void)
|
|||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
void init_irq_proc(void)
|
||||
{
|
||||
if (proc_mkdir("irq", NULL))
|
||||
create_prof_cpu_mask();
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* ext_int_hash[index] is the list head for all external interrupts that hash
|
||||
* to this index.
|
||||
|
@ -183,14 +208,6 @@ struct ext_int_info {
|
|||
/* ext_int_hash_lock protects the handler lists for external interrupts */
|
||||
DEFINE_SPINLOCK(ext_int_hash_lock);
|
||||
|
||||
static void __init init_external_interrupts(void)
|
||||
{
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
|
||||
INIT_LIST_HEAD(&ext_int_hash[idx]);
|
||||
}
|
||||
|
||||
static inline int ext_hash(u16 code)
|
||||
{
|
||||
return (code + (code >> 9)) & 0xff;
|
||||
|
@ -234,20 +251,13 @@ int unregister_external_interrupt(u16 code, ext_int_handler_t handler)
|
|||
}
|
||||
EXPORT_SYMBOL(unregister_external_interrupt);
|
||||
|
||||
void __irq_entry do_extint(struct pt_regs *regs)
|
||||
static irqreturn_t do_ext_interrupt(int irq, void *dummy)
|
||||
{
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
struct ext_code ext_code;
|
||||
struct pt_regs *old_regs;
|
||||
struct ext_int_info *p;
|
||||
int index;
|
||||
|
||||
old_regs = set_irq_regs(regs);
|
||||
irq_enter();
|
||||
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) {
|
||||
/* Serve timer interrupts first. */
|
||||
clock_comparator_work();
|
||||
}
|
||||
kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL);
|
||||
ext_code = *(struct ext_code *) ®s->int_code;
|
||||
if (ext_code.code != 0x1004)
|
||||
__get_cpu_var(s390_idle).nohz_delay = 1;
|
||||
|
@ -259,13 +269,25 @@ void __irq_entry do_extint(struct pt_regs *regs)
|
|||
p->handler(ext_code, regs->int_parm,
|
||||
regs->int_parm_long);
|
||||
rcu_read_unlock();
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
void __init init_IRQ(void)
|
||||
static struct irqaction external_interrupt = {
|
||||
.name = "EXT",
|
||||
.handler = do_ext_interrupt,
|
||||
};
|
||||
|
||||
void __init init_ext_interrupts(void)
|
||||
{
|
||||
init_external_interrupts();
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < ARRAY_SIZE(ext_int_hash); idx++)
|
||||
INIT_LIST_HEAD(&ext_int_hash[idx]);
|
||||
|
||||
irq_set_chip_and_handler(EXT_INTERRUPT,
|
||||
&dummy_irq_chip, handle_percpu_irq);
|
||||
setup_irq(EXT_INTERRUPT, &external_interrupt);
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(sc_irq_lock);
|
||||
|
@ -313,69 +335,3 @@ void measurement_alert_subclass_unregister(void)
|
|||
spin_unlock(&ma_subclass_lock);
|
||||
}
|
||||
EXPORT_SYMBOL(measurement_alert_subclass_unregister);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void synchronize_irq(unsigned int irq)
|
||||
{
|
||||
/*
|
||||
* Not needed, the handler is protected by a lock and IRQs that occur
|
||||
* after the handler is deleted are just NOPs.
|
||||
*/
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(synchronize_irq);
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_PCI
|
||||
|
||||
/* Only PCI devices have dynamically-defined IRQ handlers */
|
||||
|
||||
int request_irq(unsigned int irq, irq_handler_t handler,
|
||||
unsigned long irqflags, const char *devname, void *dev_id)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(request_irq);
|
||||
|
||||
void free_irq(unsigned int irq, void *dev_id)
|
||||
{
|
||||
WARN_ON(1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_irq);
|
||||
|
||||
void enable_irq(unsigned int irq)
|
||||
{
|
||||
WARN_ON(1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enable_irq);
|
||||
|
||||
void disable_irq(unsigned int irq)
|
||||
{
|
||||
WARN_ON(1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disable_irq);
|
||||
|
||||
#endif /* !CONFIG_PCI */
|
||||
|
||||
void disable_irq_nosync(unsigned int irq)
|
||||
{
|
||||
disable_irq(irq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disable_irq_nosync);
|
||||
|
||||
unsigned long probe_irq_on(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(probe_irq_on);
|
||||
|
||||
int probe_irq_off(unsigned long val)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(probe_irq_off);
|
||||
|
||||
unsigned int probe_irq_mask(unsigned long val)
|
||||
{
|
||||
return val;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(probe_irq_mask);
|
||||
|
|
|
@ -2,5 +2,5 @@
|
|||
# Makefile for the s390 PCI subsystem.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_msi.o pci_sysfs.o \
|
||||
obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
|
||||
pci_event.o pci_debug.o pci_insn.o
|
||||
|
|
|
@ -50,24 +50,23 @@ EXPORT_SYMBOL_GPL(zpci_list);
|
|||
DEFINE_MUTEX(zpci_list_lock);
|
||||
EXPORT_SYMBOL_GPL(zpci_list_lock);
|
||||
|
||||
|
||||
static void zpci_enable_irq(struct irq_data *data);
|
||||
static void zpci_disable_irq(struct irq_data *data);
|
||||
|
||||
static struct irq_chip zpci_irq_chip = {
|
||||
.name = "zPCI",
|
||||
.irq_unmask = zpci_enable_irq,
|
||||
.irq_mask = zpci_disable_irq,
|
||||
};
|
||||
|
||||
static struct pci_hp_callback_ops *hotplug_ops;
|
||||
|
||||
static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
|
||||
static DEFINE_SPINLOCK(zpci_domain_lock);
|
||||
|
||||
struct callback {
|
||||
irq_handler_t handler;
|
||||
void *data;
|
||||
};
|
||||
|
||||
struct zdev_irq_map {
|
||||
struct airq_iv *aibv; /* Adapter interrupt bit vector */
|
||||
struct callback *cb; /* callback handler array */
|
||||
int msi_vecs; /* consecutive MSI-vectors used */
|
||||
};
|
||||
|
||||
static struct airq_iv *zpci_aisb_iv;
|
||||
static struct zdev_irq_map *zpci_imap[ZPCI_NR_DEVICES];
|
||||
static struct airq_iv *zpci_aibv[ZPCI_NR_DEVICES];
|
||||
|
||||
/* Adapter interrupt definitions */
|
||||
static void zpci_irq_handler(struct airq_struct *airq);
|
||||
|
@ -83,19 +82,8 @@ static DECLARE_BITMAP(zpci_iomap, ZPCI_IOMAP_MAX_ENTRIES);
|
|||
struct zpci_iomap_entry *zpci_iomap_start;
|
||||
EXPORT_SYMBOL_GPL(zpci_iomap_start);
|
||||
|
||||
static struct kmem_cache *zdev_irq_cache;
|
||||
static struct kmem_cache *zdev_fmb_cache;
|
||||
|
||||
static inline int irq_to_msi_nr(unsigned int irq)
|
||||
{
|
||||
return irq & ZPCI_MSI_VEC_MASK;
|
||||
}
|
||||
|
||||
static inline int irq_to_dev_nr(unsigned int irq)
|
||||
{
|
||||
return irq >> ZPCI_MSI_VEC_BITS;
|
||||
}
|
||||
|
||||
struct zpci_dev *get_zdev(struct pci_dev *pdev)
|
||||
{
|
||||
return (struct zpci_dev *) pdev->sysdata;
|
||||
|
@ -283,21 +271,42 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
|
|||
return rc;
|
||||
}
|
||||
|
||||
void enable_irq(unsigned int irq)
|
||||
static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
|
||||
{
|
||||
struct msi_desc *msi = irq_get_msi_desc(irq);
|
||||
int offset, pos;
|
||||
u32 mask_bits;
|
||||
|
||||
if (msi->msi_attrib.is_msix) {
|
||||
offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
|
||||
PCI_MSIX_ENTRY_VECTOR_CTRL;
|
||||
msi->masked = readl(msi->mask_base + offset);
|
||||
writel(flag, msi->mask_base + offset);
|
||||
} else if (msi->msi_attrib.maskbit) {
|
||||
pos = (long) msi->mask_base;
|
||||
pci_read_config_dword(msi->dev, pos, &mask_bits);
|
||||
mask_bits &= ~(mask);
|
||||
mask_bits |= flag & mask;
|
||||
pci_write_config_dword(msi->dev, pos, mask_bits);
|
||||
} else
|
||||
return 0;
|
||||
|
||||
msi->msi_attrib.maskbit = !!flag;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void zpci_enable_irq(struct irq_data *data)
|
||||
{
|
||||
struct msi_desc *msi = irq_get_msi_desc(data->irq);
|
||||
|
||||
zpci_msi_set_mask_bits(msi, 1, 0);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(enable_irq);
|
||||
|
||||
void disable_irq(unsigned int irq)
|
||||
static void zpci_disable_irq(struct irq_data *data)
|
||||
{
|
||||
struct msi_desc *msi = irq_get_msi_desc(irq);
|
||||
struct msi_desc *msi = irq_get_msi_desc(data->irq);
|
||||
|
||||
zpci_msi_set_mask_bits(msi, 1, 1);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(disable_irq);
|
||||
|
||||
void pcibios_fixup_bus(struct pci_bus *bus)
|
||||
{
|
||||
|
@ -385,7 +394,7 @@ static struct pci_ops pci_root_ops = {
|
|||
static void zpci_irq_handler(struct airq_struct *airq)
|
||||
{
|
||||
unsigned long si, ai;
|
||||
struct zdev_irq_map *imap;
|
||||
struct airq_iv *aibv;
|
||||
int irqs_on = 0;
|
||||
|
||||
inc_irq_stat(IRQIO_PCI);
|
||||
|
@ -403,69 +412,33 @@ static void zpci_irq_handler(struct airq_struct *airq)
|
|||
}
|
||||
|
||||
/* Scan the adapter interrupt vector for this device. */
|
||||
imap = zpci_imap[si];
|
||||
aibv = zpci_aibv[si];
|
||||
for (ai = 0;;) {
|
||||
ai = airq_iv_scan(imap->aibv, ai, imap->msi_vecs);
|
||||
ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
|
||||
if (ai == -1UL)
|
||||
break;
|
||||
inc_irq_stat(IRQIO_MSI);
|
||||
airq_iv_lock(imap->aibv, ai);
|
||||
if (imap->cb[ai].handler)
|
||||
imap->cb[ai].handler(ai, imap->cb[ai].data);
|
||||
airq_iv_unlock(imap->aibv, ai);
|
||||
airq_iv_lock(aibv, ai);
|
||||
generic_handle_irq(airq_iv_get_data(aibv, ai));
|
||||
airq_iv_unlock(aibv, ai);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static int zpci_alloc_msi(struct zpci_dev *zdev, int msi_vecs)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
/* Alloc aibv & callback space */
|
||||
zdev->irq_map = kmem_cache_zalloc(zdev_irq_cache, GFP_KERNEL);
|
||||
if (!zdev->irq_map)
|
||||
goto out;
|
||||
/* Store the number of used MSI vectors */
|
||||
zdev->irq_map->msi_vecs = msi_vecs;
|
||||
/* Allocate callback array */
|
||||
size = sizeof(struct callback) * msi_vecs;
|
||||
zdev->irq_map->cb = kzalloc(size, GFP_KERNEL);
|
||||
if (!zdev->irq_map->cb)
|
||||
goto out_map;
|
||||
/* Allocate msi_map array */
|
||||
size = sizeof(struct msi_map) * msi_vecs;
|
||||
zdev->msi_map = kzalloc(size, GFP_KERNEL);
|
||||
if (!zdev->msi_map)
|
||||
goto out_cb;
|
||||
return 0;
|
||||
|
||||
out_cb:
|
||||
kfree(zdev->irq_map->cb);
|
||||
out_map:
|
||||
kmem_cache_free(zdev_irq_cache, zdev->irq_map);
|
||||
out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void zpci_free_msi(struct zpci_dev *zdev)
|
||||
{
|
||||
kfree(zdev->msi_map);
|
||||
kfree(zdev->irq_map->cb);
|
||||
kmem_cache_free(zdev_irq_cache, zdev->irq_map);
|
||||
}
|
||||
|
||||
int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
||||
{
|
||||
struct zpci_dev *zdev = get_zdev(pdev);
|
||||
unsigned int msi_nr, msi_vecs;
|
||||
unsigned int hwirq, irq, msi_vecs;
|
||||
unsigned long aisb;
|
||||
struct msi_desc *msi;
|
||||
struct msi_msg msg;
|
||||
int rc;
|
||||
|
||||
pr_debug("%s: requesting %d MSI-X interrupts...", __func__, nvec);
|
||||
if (type != PCI_CAP_ID_MSIX && type != PCI_CAP_ID_MSI)
|
||||
return -EINVAL;
|
||||
msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX);
|
||||
msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
|
||||
|
||||
/* Allocate adapter summary indicator bit */
|
||||
rc = -EIO;
|
||||
|
@ -476,30 +449,31 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
|||
|
||||
/* Create adapter interrupt vector */
|
||||
rc = -ENOMEM;
|
||||
zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_BITLOCK);
|
||||
zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
|
||||
if (!zdev->aibv)
|
||||
goto out_si;
|
||||
|
||||
/* Allocate data structures for msi interrupts */
|
||||
rc = zpci_alloc_msi(zdev, msi_vecs);
|
||||
if (rc)
|
||||
goto out_iv;
|
||||
|
||||
/* Wire up shortcut pointer */
|
||||
zpci_imap[aisb] = zdev->irq_map;
|
||||
zdev->irq_map->aibv = zdev->aibv;
|
||||
zpci_aibv[aisb] = zdev->aibv;
|
||||
|
||||
/*
|
||||
* TODO: irq number 0 wont be found if we return less than the
|
||||
* requested MSIs. Ignore it for now and fix in common code.
|
||||
*/
|
||||
msi_nr = aisb << ZPCI_MSI_VEC_BITS;
|
||||
/* Request MSI interrupts */
|
||||
hwirq = 0;
|
||||
list_for_each_entry(msi, &pdev->msi_list, list) {
|
||||
rc = zpci_setup_msi_irq(zdev, msi, msi_nr,
|
||||
aisb << ZPCI_MSI_VEC_BITS);
|
||||
rc = -EIO;
|
||||
irq = irq_alloc_desc(0); /* Alloc irq on node 0 */
|
||||
if (irq == NO_IRQ)
|
||||
goto out_msi;
|
||||
rc = irq_set_msi_desc(irq, msi);
|
||||
if (rc)
|
||||
goto out_msi;
|
||||
msi_nr++;
|
||||
irq_set_chip_and_handler(irq, &zpci_irq_chip,
|
||||
handle_simple_irq);
|
||||
msg.data = hwirq;
|
||||
msg.address_lo = zdev->msi_addr & 0xffffffff;
|
||||
msg.address_hi = zdev->msi_addr >> 32;
|
||||
write_msi_msg(irq, &msg);
|
||||
airq_iv_set_data(zdev->aibv, hwirq, irq);
|
||||
hwirq++;
|
||||
}
|
||||
|
||||
/* Enable adapter interrupts */
|
||||
|
@ -510,14 +484,17 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
|
|||
return (msi_vecs == nvec) ? 0 : msi_vecs;
|
||||
|
||||
out_msi:
|
||||
msi_nr -= aisb << ZPCI_MSI_VEC_BITS;
|
||||
list_for_each_entry(msi, &pdev->msi_list, list) {
|
||||
if (msi_nr-- == 0)
|
||||
if (hwirq-- == 0)
|
||||
break;
|
||||
zpci_teardown_msi_irq(zdev, msi);
|
||||
irq_set_msi_desc(msi->irq, NULL);
|
||||
irq_free_desc(msi->irq);
|
||||
msi->msg.address_lo = 0;
|
||||
msi->msg.address_hi = 0;
|
||||
msi->msg.data = 0;
|
||||
msi->irq = 0;
|
||||
}
|
||||
zpci_free_msi(zdev);
|
||||
out_iv:
|
||||
zpci_aibv[aisb] = NULL;
|
||||
airq_iv_release(zdev->aibv);
|
||||
out_si:
|
||||
airq_iv_free_bit(zpci_aisb_iv, aisb);
|
||||
|
@ -541,10 +518,18 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
|
|||
return;
|
||||
}
|
||||
|
||||
list_for_each_entry(msi, &pdev->msi_list, list)
|
||||
zpci_teardown_msi_irq(zdev, msi);
|
||||
/* Release MSI interrupts */
|
||||
list_for_each_entry(msi, &pdev->msi_list, list) {
|
||||
zpci_msi_set_mask_bits(msi, 1, 1);
|
||||
irq_set_msi_desc(msi->irq, NULL);
|
||||
irq_free_desc(msi->irq);
|
||||
msi->msg.address_lo = 0;
|
||||
msi->msg.address_hi = 0;
|
||||
msi->msg.data = 0;
|
||||
msi->irq = 0;
|
||||
}
|
||||
|
||||
zpci_free_msi(zdev);
|
||||
zpci_aibv[zdev->aisb] = NULL;
|
||||
airq_iv_release(zdev->aibv);
|
||||
airq_iv_free_bit(zpci_aisb_iv, zdev->aisb);
|
||||
}
|
||||
|
@ -625,61 +610,6 @@ int pcibios_add_platform_entries(struct pci_dev *pdev)
|
|||
return zpci_sysfs_add_device(&pdev->dev);
|
||||
}
|
||||
|
||||
int zpci_request_irq(unsigned int irq, irq_handler_t handler, void *data)
|
||||
{
|
||||
unsigned int msi_nr = irq_to_msi_nr(irq);
|
||||
unsigned int dev_nr = irq_to_dev_nr(irq);
|
||||
struct zdev_irq_map *imap;
|
||||
struct msi_desc *msi;
|
||||
|
||||
msi = irq_get_msi_desc(irq);
|
||||
if (!msi)
|
||||
return -EIO;
|
||||
|
||||
imap = zpci_imap[dev_nr];
|
||||
imap->cb[msi_nr].handler = handler;
|
||||
imap->cb[msi_nr].data = data;
|
||||
|
||||
/*
|
||||
* The generic MSI code returns with the interrupt disabled on the
|
||||
* card, using the MSI mask bits. Firmware doesn't appear to unmask
|
||||
* at that level, so we do it here by hand.
|
||||
*/
|
||||
zpci_msi_set_mask_bits(msi, 1, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void zpci_free_irq(unsigned int irq)
|
||||
{
|
||||
unsigned int msi_nr = irq_to_msi_nr(irq);
|
||||
unsigned int dev_nr = irq_to_dev_nr(irq);
|
||||
struct zdev_irq_map *imap;
|
||||
struct msi_desc *msi;
|
||||
|
||||
/* Disable interrupt */
|
||||
msi = irq_get_msi_desc(irq);
|
||||
if (!msi)
|
||||
return;
|
||||
zpci_msi_set_mask_bits(msi, 1, 1);
|
||||
imap = zpci_imap[dev_nr];
|
||||
imap->cb[msi_nr].handler = NULL;
|
||||
imap->cb[msi_nr].data = NULL;
|
||||
synchronize_rcu();
|
||||
}
|
||||
|
||||
int request_irq(unsigned int irq, irq_handler_t handler,
|
||||
unsigned long irqflags, const char *devname, void *dev_id)
|
||||
{
|
||||
return zpci_request_irq(irq, handler, dev_id);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(request_irq);
|
||||
|
||||
void free_irq(unsigned int irq, void *dev_id)
|
||||
{
|
||||
zpci_free_irq(irq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(free_irq);
|
||||
|
||||
static int __init zpci_irq_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
@ -930,15 +860,10 @@ static inline int barsize(u8 size)
|
|||
|
||||
static int zpci_mem_init(void)
|
||||
{
|
||||
zdev_irq_cache = kmem_cache_create("PCI_IRQ_cache", sizeof(struct zdev_irq_map),
|
||||
L1_CACHE_BYTES, SLAB_HWCACHE_ALIGN, NULL);
|
||||
if (!zdev_irq_cache)
|
||||
goto error_zdev;
|
||||
|
||||
zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
|
||||
16, 0, NULL);
|
||||
if (!zdev_fmb_cache)
|
||||
goto error_fmb;
|
||||
goto error_zdev;
|
||||
|
||||
/* TODO: use realloc */
|
||||
zpci_iomap_start = kzalloc(ZPCI_IOMAP_MAX_ENTRIES * sizeof(*zpci_iomap_start),
|
||||
|
@ -949,8 +874,6 @@ static int zpci_mem_init(void)
|
|||
|
||||
error_iomap:
|
||||
kmem_cache_destroy(zdev_fmb_cache);
|
||||
error_fmb:
|
||||
kmem_cache_destroy(zdev_irq_cache);
|
||||
error_zdev:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -958,7 +881,6 @@ static int zpci_mem_init(void)
|
|||
static void zpci_mem_exit(void)
|
||||
{
|
||||
kfree(zpci_iomap_start);
|
||||
kmem_cache_destroy(zdev_irq_cache);
|
||||
kmem_cache_destroy(zdev_fmb_cache);
|
||||
}
|
||||
|
||||
|
@ -1007,16 +929,12 @@ static int __init pci_base_init(void)
|
|||
|
||||
rc = zpci_debug_init();
|
||||
if (rc)
|
||||
return rc;
|
||||
goto out;
|
||||
|
||||
rc = zpci_mem_init();
|
||||
if (rc)
|
||||
goto out_mem;
|
||||
|
||||
rc = zpci_msihash_init();
|
||||
if (rc)
|
||||
goto out_hash;
|
||||
|
||||
rc = zpci_irq_init();
|
||||
if (rc)
|
||||
goto out_irq;
|
||||
|
@ -1036,11 +954,10 @@ static int __init pci_base_init(void)
|
|||
out_dma:
|
||||
zpci_irq_exit();
|
||||
out_irq:
|
||||
zpci_msihash_exit();
|
||||
out_hash:
|
||||
zpci_mem_exit();
|
||||
out_mem:
|
||||
zpci_debug_exit();
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
subsys_initcall(pci_base_init);
|
||||
|
|
|
@ -1,134 +0,0 @@
|
|||
/*
|
||||
* Copyright IBM Corp. 2012
|
||||
*
|
||||
* Author(s):
|
||||
* Jan Glauber <jang@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#define COMPONENT "zPCI"
|
||||
#define pr_fmt(fmt) COMPONENT ": " fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/rculist.h>
|
||||
#include <linux/hash.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/msi.h>
|
||||
#include <asm/hw_irq.h>
|
||||
|
||||
/* mapping of irq numbers to msi_desc */
|
||||
static struct hlist_head *msi_hash;
|
||||
static const unsigned int msi_hash_bits = 8;
|
||||
#define MSI_HASH_BUCKETS (1U << msi_hash_bits)
|
||||
#define msi_hashfn(nr) hash_long(nr, msi_hash_bits)
|
||||
|
||||
static DEFINE_SPINLOCK(msi_map_lock);
|
||||
|
||||
struct msi_desc *__irq_get_msi_desc(unsigned int irq)
|
||||
{
|
||||
struct msi_map *map;
|
||||
|
||||
hlist_for_each_entry_rcu(map,
|
||||
&msi_hash[msi_hashfn(irq)], msi_chain)
|
||||
if (map->irq == irq)
|
||||
return map->msi;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
|
||||
{
|
||||
if (msi->msi_attrib.is_msix) {
|
||||
int offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
|
||||
PCI_MSIX_ENTRY_VECTOR_CTRL;
|
||||
msi->masked = readl(msi->mask_base + offset);
|
||||
writel(flag, msi->mask_base + offset);
|
||||
} else {
|
||||
if (msi->msi_attrib.maskbit) {
|
||||
int pos;
|
||||
u32 mask_bits;
|
||||
|
||||
pos = (long) msi->mask_base;
|
||||
pci_read_config_dword(msi->dev, pos, &mask_bits);
|
||||
mask_bits &= ~(mask);
|
||||
mask_bits |= flag & mask;
|
||||
pci_write_config_dword(msi->dev, pos, mask_bits);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
msi->msi_attrib.maskbit = !!flag;
|
||||
return 1;
|
||||
}
|
||||
|
||||
int zpci_setup_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi,
|
||||
unsigned int nr, int offset)
|
||||
{
|
||||
struct msi_map *map;
|
||||
struct msi_msg msg;
|
||||
int rc;
|
||||
|
||||
map = zdev->msi_map + (nr & ZPCI_MSI_VEC_MASK);
|
||||
map->irq = nr;
|
||||
map->msi = msi;
|
||||
INIT_HLIST_NODE(&map->msi_chain);
|
||||
|
||||
pr_debug("%s hashing irq: %u to bucket nr: %llu\n",
|
||||
__func__, nr, msi_hashfn(nr));
|
||||
hlist_add_head_rcu(&map->msi_chain, &msi_hash[msi_hashfn(nr)]);
|
||||
|
||||
spin_lock(&msi_map_lock);
|
||||
rc = irq_set_msi_desc(nr, msi);
|
||||
if (rc) {
|
||||
spin_unlock(&msi_map_lock);
|
||||
hlist_del_rcu(&map->msi_chain);
|
||||
return rc;
|
||||
}
|
||||
spin_unlock(&msi_map_lock);
|
||||
|
||||
msg.data = nr - offset;
|
||||
msg.address_lo = zdev->msi_addr & 0xffffffff;
|
||||
msg.address_hi = zdev->msi_addr >> 32;
|
||||
write_msi_msg(nr, &msg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void zpci_teardown_msi_irq(struct zpci_dev *zdev, struct msi_desc *msi)
|
||||
{
|
||||
int nr = msi->irq & ZPCI_MSI_VEC_MASK;
|
||||
struct msi_map *map;
|
||||
|
||||
zpci_msi_set_mask_bits(msi, 1, 1);
|
||||
msi->msg.address_lo = 0;
|
||||
msi->msg.address_hi = 0;
|
||||
msi->msg.data = 0;
|
||||
msi->irq = 0;
|
||||
|
||||
spin_lock(&msi_map_lock);
|
||||
map = zdev->msi_map + nr;
|
||||
hlist_del_rcu(&map->msi_chain);
|
||||
spin_unlock(&msi_map_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* The msi hash table has 256 entries which is good for 4..20
|
||||
* devices (a typical device allocates 10 + CPUs MSI's). Maybe make
|
||||
* the hash table size adjustable later.
|
||||
*/
|
||||
int __init zpci_msihash_init(void)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
msi_hash = kmalloc(MSI_HASH_BUCKETS * sizeof(*msi_hash), GFP_KERNEL);
|
||||
if (!msi_hash)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < MSI_HASH_BUCKETS; i++)
|
||||
INIT_HLIST_HEAD(&msi_hash[i]);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init zpci_msihash_exit(void)
|
||||
{
|
||||
kfree(msi_hash);
|
||||
}
|
|
@ -81,17 +81,34 @@ void unregister_adapter_interrupt(struct airq_struct *airq)
|
|||
}
|
||||
EXPORT_SYMBOL(unregister_adapter_interrupt);
|
||||
|
||||
void do_adapter_IO(u8 isc)
|
||||
static irqreturn_t do_airq_interrupt(int irq, void *dummy)
|
||||
{
|
||||
struct tpi_info *tpi_info;
|
||||
struct airq_struct *airq;
|
||||
struct hlist_head *head;
|
||||
|
||||
head = &airq_lists[isc];
|
||||
__this_cpu_write(s390_idle.nohz_delay, 1);
|
||||
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
|
||||
head = &airq_lists[tpi_info->isc];
|
||||
rcu_read_lock();
|
||||
hlist_for_each_entry_rcu(airq, head, list)
|
||||
if ((*airq->lsi_ptr & airq->lsi_mask) != 0)
|
||||
airq->handler(airq);
|
||||
rcu_read_unlock();
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irqaction airq_interrupt = {
|
||||
.name = "AIO",
|
||||
.handler = do_airq_interrupt,
|
||||
};
|
||||
|
||||
void __init init_airq_interrupts(void)
|
||||
{
|
||||
irq_set_chip_and_handler(THIN_INTERRUPT,
|
||||
&dummy_irq_chip, handle_percpu_irq);
|
||||
setup_irq(THIN_INTERRUPT, &airq_interrupt);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -561,37 +561,23 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid)
|
|||
}
|
||||
|
||||
/*
|
||||
* do_IRQ() handles all normal I/O device IRQ's (the special
|
||||
* SMP cross-CPU interrupts have their own specific
|
||||
* handlers).
|
||||
*
|
||||
* do_cio_interrupt() handles all normal I/O device IRQ's
|
||||
*/
|
||||
void __irq_entry do_IRQ(struct pt_regs *regs)
|
||||
static irqreturn_t do_cio_interrupt(int irq, void *dummy)
|
||||
{
|
||||
struct tpi_info *tpi_info = (struct tpi_info *) ®s->int_code;
|
||||
struct tpi_info *tpi_info;
|
||||
struct subchannel *sch;
|
||||
struct irb *irb;
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
old_regs = set_irq_regs(regs);
|
||||
irq_enter();
|
||||
__this_cpu_write(s390_idle.nohz_delay, 1);
|
||||
if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
|
||||
/* Serve timer interrupts first. */
|
||||
clock_comparator_work();
|
||||
|
||||
kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
|
||||
tpi_info = (struct tpi_info *) &get_irq_regs()->int_code;
|
||||
irb = (struct irb *) &S390_lowcore.irb;
|
||||
if (tpi_info->adapter_IO) {
|
||||
do_adapter_IO(tpi_info->isc);
|
||||
goto out;
|
||||
}
|
||||
sch = (struct subchannel *)(unsigned long) tpi_info->intparm;
|
||||
if (!sch) {
|
||||
/* Clear pending interrupt condition. */
|
||||
inc_irq_stat(IRQIO_CIO);
|
||||
tsch(tpi_info->schid, irb);
|
||||
goto out;
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
spin_lock(sch->lock);
|
||||
/* Store interrupt response block to lowcore. */
|
||||
|
@ -606,9 +592,23 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
|
|||
} else
|
||||
inc_irq_stat(IRQIO_CIO);
|
||||
spin_unlock(sch->lock);
|
||||
out:
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static struct irq_desc *irq_desc_io;
|
||||
|
||||
static struct irqaction io_interrupt = {
|
||||
.name = "IO",
|
||||
.handler = do_cio_interrupt,
|
||||
};
|
||||
|
||||
void __init init_cio_interrupts(void)
|
||||
{
|
||||
irq_set_chip_and_handler(IO_INTERRUPT,
|
||||
&dummy_irq_chip, handle_percpu_irq);
|
||||
setup_irq(IO_INTERRUPT, &io_interrupt);
|
||||
irq_desc_io = irq_to_desc(IO_INTERRUPT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CCW_CONSOLE
|
||||
|
@ -635,7 +635,7 @@ void cio_tsch(struct subchannel *sch)
|
|||
local_bh_disable();
|
||||
irq_enter();
|
||||
}
|
||||
kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL);
|
||||
kstat_incr_irqs_this_cpu(IO_INTERRUPT, irq_desc_io);
|
||||
if (sch->driver && sch->driver->irq)
|
||||
sch->driver->irq(sch);
|
||||
else
|
||||
|
|
|
@ -121,9 +121,6 @@ extern int cio_commit_config(struct subchannel *sch);
|
|||
int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key);
|
||||
int cio_tm_intrg(struct subchannel *sch);
|
||||
|
||||
void do_adapter_IO(u8 isc);
|
||||
void do_IRQ(struct pt_regs *);
|
||||
|
||||
/* Use with care. */
|
||||
#ifdef CONFIG_CCW_CONSOLE
|
||||
extern struct subchannel *cio_probe_console(void);
|
||||
|
|
Loading…
Reference in New Issue
Block a user