forked from luck/tmp_suning_uos_patched
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "This update delivers: - Yet another interrupt chip diver (LPC32xx) - Core functions to handle partitioned per-cpu interrupts - Enhancements to the IPI core - Proper handling of irq type configuration - A large set of ARM GIC enhancements - The usual pile of small fixes, cleanups and enhancements" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (31 commits) irqchip/bcm2836: Use a more generic memory barrier call irqchip/bcm2836: Fix compiler warning on 64-bit build irqchip/bcm2836: Drop smp_set_ops on arm64 builds irqchip/gic: Add helper functions for GIC setup and teardown irqchip/gic: Store GIC configuration parameters irqchip/gic: Pass GIC pointer to save/restore functions irqchip/gic: Return an error if GIC initialisation fails irqchip/gic: Remove static irq_chip definition for eoimode1 irqchip/gic: Don't initialise chip if mapping IO space fails irqchip/gic: WARN if setting the interrupt type for a PPI fails irqchip/gic: Don't unnecessarily write the IRQ configuration irqchip: Mask the non-type/sense bits when translating an IRQ genirq: Ensure IRQ descriptor is valid when setting-up the IRQ irqchip/gic-v3: Configure all interrupts as non-secure Group-1 irqchip/gic-v2m: Add workaround for Broadcom NS2 GICv2m erratum irqchip/irq-alpine-msi: Don't use <asm-generic/msi.h> irqchip/mbigen: Checking for IS_ERR() instead of NULL irqchip/gic-v3: Remove inexistant register definition irqchip/gicv3-its: Don't allow devices whose ID is outside range irqchip: Add LPC32xx interrupt controller driver ...
This commit is contained in:
commit
ede40902cf
|
@ -11,6 +11,8 @@ Main node required properties:
|
|||
- interrupt-controller : Identifies the node as an interrupt controller
|
||||
- #interrupt-cells : Specifies the number of cells needed to encode an
|
||||
interrupt source. Must be a single cell with a value of at least 3.
|
||||
If the system requires describing PPI affinity, then the value must
|
||||
be at least 4.
|
||||
|
||||
The 1st cell is the interrupt type; 0 for SPI interrupts, 1 for PPI
|
||||
interrupts. Other values are reserved for future use.
|
||||
|
@ -24,7 +26,14 @@ Main node required properties:
|
|||
1 = edge triggered
|
||||
4 = level triggered
|
||||
|
||||
Cells 4 and beyond are reserved for future use and must have a value
|
||||
The 4th cell is a phandle to a node describing a set of CPUs this
|
||||
interrupt is affine to. The interrupt must be a PPI, and the node
|
||||
pointed must be a subnode of the "ppi-partitions" subnode. For
|
||||
interrupt types other than PPI or PPIs that are not partitionned,
|
||||
this cell must be zero. See the "ppi-partitions" node description
|
||||
below.
|
||||
|
||||
Cells 5 and beyond are reserved for future use and must have a value
|
||||
of 0 if present.
|
||||
|
||||
- reg : Specifies base physical address(s) and size of the GIC
|
||||
|
@ -50,6 +59,11 @@ Optional
|
|||
|
||||
Sub-nodes:
|
||||
|
||||
PPI affinity can be expressed as a single "ppi-partitions" node,
|
||||
containing a set of sub-nodes, each with the following property:
|
||||
- affinity: Should be a list of phandles to CPU nodes (as described in
|
||||
Documentation/devicetree/bindings/arm/cpus.txt).
|
||||
|
||||
GICv3 has one or more Interrupt Translation Services (ITS) that are
|
||||
used to route Message Signalled Interrupts (MSI) to the CPUs.
|
||||
|
||||
|
@ -91,7 +105,7 @@ Examples:
|
|||
|
||||
gic: interrupt-controller@2c010000 {
|
||||
compatible = "arm,gic-v3";
|
||||
#interrupt-cells = <3>;
|
||||
#interrupt-cells = <4>;
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
ranges;
|
||||
|
@ -119,4 +133,20 @@ Examples:
|
|||
#msi-cells = <1>;
|
||||
reg = <0x0 0x2c400000 0 0x200000>;
|
||||
};
|
||||
|
||||
ppi-partitions {
|
||||
part0: interrupt-partition-0 {
|
||||
affinity = <&cpu0 &cpu2>;
|
||||
};
|
||||
|
||||
part1: interrupt-partition-1 {
|
||||
affinity = <&cpu1 &cpu3>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
device@0 {
|
||||
reg = <0 0 0 4>;
|
||||
interrupts = <1 1 4 &part0>;
|
||||
};
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
* Freescale Layerscape SCFG PCIe MSI controller
|
||||
|
||||
Required properties:
|
||||
|
||||
- compatible: should be "fsl,<soc-name>-msi" to identify
|
||||
Layerscape PCIe MSI controller block such as:
|
||||
"fsl,1s1021a-msi"
|
||||
"fsl,1s1043a-msi"
|
||||
- msi-controller: indicates that this is a PCIe MSI controller node
|
||||
- reg: physical base address of the controller and length of memory mapped.
|
||||
- interrupts: an interrupt to the parent interrupt controller.
|
||||
|
||||
Optional properties:
|
||||
- interrupt-parent: the phandle to the parent interrupt controller.
|
||||
|
||||
This interrupt controller hardware is a second level interrupt controller that
|
||||
is hooked to a parent interrupt controller: e.g: ARM GIC for ARM-based
|
||||
platforms. If interrupt-parent is not provided, the default parent interrupt
|
||||
controller will be used.
|
||||
Each PCIe node needs to have property msi-parent that points to
|
||||
MSI controller node
|
||||
|
||||
Examples:
|
||||
|
||||
msi1: msi-controller@1571000 {
|
||||
compatible = "fsl,1s1043a-msi";
|
||||
reg = <0x0 0x1571000 0x0 0x8>,
|
||||
msi-controller;
|
||||
interrupts = <0 116 0x4>;
|
||||
};
|
|
@ -531,6 +531,8 @@ config ARCH_LPC32XX
|
|||
select COMMON_CLK
|
||||
select CPU_ARM926T
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select MULTI_IRQ_HANDLER
|
||||
select SPARSE_IRQ
|
||||
select USE_OF
|
||||
help
|
||||
Support for the NXP LPC32XX family of processors
|
||||
|
|
|
@ -206,7 +206,6 @@ static const char *const lpc32xx_dt_compat[] __initconst = {
|
|||
DT_MACHINE_START(LPC32XX_DT, "LPC32XX SoC (Flattened Device Tree)")
|
||||
.atag_offset = 0x100,
|
||||
.map_io = lpc32xx_map_io,
|
||||
.init_irq = lpc32xx_init_irq,
|
||||
.init_machine = lpc3250_machine_init,
|
||||
.dt_compat = lpc32xx_dt_compat,
|
||||
MACHINE_END
|
||||
|
|
|
@ -27,6 +27,7 @@ config ARM_GIC_V3
|
|||
select IRQ_DOMAIN
|
||||
select MULTI_IRQ_HANDLER
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
select PARTITION_PERCPU
|
||||
|
||||
config ARM_GIC_V3_ITS
|
||||
bool
|
||||
|
@ -244,3 +245,11 @@ config IRQ_MXS
|
|||
config MVEBU_ODMI
|
||||
bool
|
||||
select GENERIC_MSI_IRQ_DOMAIN
|
||||
|
||||
config LS_SCFG_MSI
|
||||
def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
|
||||
depends on PCI && PCI_MSI
|
||||
select PCI_MSI_IRQ_DOMAIN
|
||||
|
||||
config PARTITION_PERCPU
|
||||
bool
|
||||
|
|
|
@ -7,6 +7,7 @@ obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o
|
|||
obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2836.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o
|
||||
obj-$(CONFIG_ARCH_HIP04) += irq-hip04.o
|
||||
obj-$(CONFIG_ARCH_LPC32XX) += irq-lpc32xx.o
|
||||
obj-$(CONFIG_ARCH_MMP) += irq-mmp.o
|
||||
obj-$(CONFIG_IRQ_MXS) += irq-mxs.o
|
||||
obj-$(CONFIG_ARCH_TEGRA) += irq-tegra.o
|
||||
|
@ -27,6 +28,7 @@ obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o
|
|||
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
|
||||
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
|
||||
obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o
|
||||
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
|
||||
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
|
||||
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
|
||||
obj-$(CONFIG_ARM_VIC) += irq-vic.o
|
||||
|
@ -65,3 +67,4 @@ obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
|
|||
obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
|
||||
obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
|
||||
obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
|
||||
obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include <linux/slab.h>
|
||||
|
||||
#include <asm/irq.h>
|
||||
#include <asm-generic/msi.h>
|
||||
#include <asm/msi.h>
|
||||
|
||||
/* MSIX message address format: local GIC target */
|
||||
#define ALPINE_MSIX_SPI_TARGET_CLUSTER0 BIT(16)
|
||||
|
|
|
@ -195,7 +195,7 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask,
|
|||
* Ensure that stores to normal memory are visible to the
|
||||
* other CPUs before issuing the IPI.
|
||||
*/
|
||||
dsb();
|
||||
smp_wmb();
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
writel(1 << ipi, mailbox0_base + 16 * cpu);
|
||||
|
@ -223,6 +223,7 @@ static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = {
|
|||
.priority = 100,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARM
|
||||
int __init bcm2836_smp_boot_secondary(unsigned int cpu,
|
||||
struct task_struct *idle)
|
||||
{
|
||||
|
@ -238,7 +239,7 @@ int __init bcm2836_smp_boot_secondary(unsigned int cpu,
|
|||
static const struct smp_operations bcm2836_smp_ops __initconst = {
|
||||
.smp_boot_secondary = bcm2836_smp_boot_secondary,
|
||||
};
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
||||
static const struct irq_domain_ops bcm2836_arm_irqchip_intc_ops = {
|
||||
|
@ -252,12 +253,15 @@ bcm2836_arm_irqchip_smp_init(void)
|
|||
/* Unmask IPIs to the boot CPU. */
|
||||
bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier,
|
||||
CPU_STARTING,
|
||||
(void *)smp_processor_id());
|
||||
(void *)(uintptr_t)smp_processor_id());
|
||||
register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier);
|
||||
|
||||
set_smp_cross_call(bcm2836_arm_irqchip_send_ipi);
|
||||
|
||||
#ifdef CONFIG_ARM
|
||||
smp_set_ops(&bcm2836_smp_ops);
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -183,7 +183,7 @@ static int crossbar_domain_translate(struct irq_domain *d,
|
|||
return -EINVAL;
|
||||
|
||||
*hwirq = fwspec->param[1];
|
||||
*type = fwspec->param[2];
|
||||
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -50,14 +50,26 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
|
|||
else if (type & IRQ_TYPE_EDGE_BOTH)
|
||||
val |= confmask;
|
||||
|
||||
/* If the current configuration is the same, then we are done */
|
||||
if (val == oldval)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* Write back the new configuration, and possibly re-enable
|
||||
* the interrupt. If we tried to write a new configuration and failed,
|
||||
* return an error.
|
||||
* the interrupt. If we fail to write a new configuration for
|
||||
* an SPI then WARN and return an error. If we fail to write the
|
||||
* configuration for a PPI this is most likely because the GIC
|
||||
* does not allow us to set the configuration or we are in a
|
||||
* non-secure mode, and hence it may not be catastrophic.
|
||||
*/
|
||||
writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
|
||||
if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval)
|
||||
ret = -EINVAL;
|
||||
if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val) {
|
||||
if (WARN_ON(irq >= 32))
|
||||
ret = -EINVAL;
|
||||
else
|
||||
pr_warn("GIC: PPI%d is secure or misconfigured\n",
|
||||
irq - 16);
|
||||
}
|
||||
|
||||
if (sync_access)
|
||||
sync_access();
|
||||
|
|
|
@ -49,6 +49,9 @@
|
|||
/* APM X-Gene with GICv2m MSI_IIDR register value */
|
||||
#define XGENE_GICV2M_MSI_IIDR 0x06000170
|
||||
|
||||
/* Broadcom NS2 GICv2m MSI_IIDR register value */
|
||||
#define BCM_NS2_GICV2M_MSI_IIDR 0x0000013f
|
||||
|
||||
/* List of flags for specific v2m implementation */
|
||||
#define GICV2M_NEEDS_SPI_OFFSET 0x00000001
|
||||
|
||||
|
@ -62,6 +65,7 @@ struct v2m_data {
|
|||
void __iomem *base; /* GICv2m virt address */
|
||||
u32 spi_start; /* The SPI number that MSIs start */
|
||||
u32 nr_spis; /* The number of SPIs for MSIs */
|
||||
u32 spi_offset; /* offset to be subtracted from SPI number */
|
||||
unsigned long *bm; /* MSI vector bitmap */
|
||||
u32 flags; /* v2m flags for specific implementation */
|
||||
};
|
||||
|
@ -102,7 +106,7 @@ static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
|
|||
msg->data = data->hwirq;
|
||||
|
||||
if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET)
|
||||
msg->data -= v2m->spi_start;
|
||||
msg->data -= v2m->spi_offset;
|
||||
}
|
||||
|
||||
static struct irq_chip gicv2m_irq_chip = {
|
||||
|
@ -340,9 +344,20 @@ static int __init gicv2m_init_one(struct fwnode_handle *fwnode,
|
|||
* different from the standard GICv2m implementation where
|
||||
* the MSI data is the absolute value within the range from
|
||||
* spi_start to (spi_start + num_spis).
|
||||
*
|
||||
* Broadom NS2 GICv2m implementation has an erratum where the MSI data
|
||||
* is 'spi_number - 32'
|
||||
*/
|
||||
if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR)
|
||||
switch (readl_relaxed(v2m->base + V2M_MSI_IIDR)) {
|
||||
case XGENE_GICV2M_MSI_IIDR:
|
||||
v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
|
||||
v2m->spi_offset = v2m->spi_start;
|
||||
break;
|
||||
case BCM_NS2_GICV2M_MSI_IIDR:
|
||||
v2m->flags |= GICV2M_NEEDS_SPI_OFFSET;
|
||||
v2m->spi_offset = 32;
|
||||
break;
|
||||
}
|
||||
|
||||
v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis),
|
||||
GFP_KERNEL);
|
||||
|
|
|
@ -54,6 +54,16 @@ struct its_collection {
|
|||
u16 col_id;
|
||||
};
|
||||
|
||||
/*
|
||||
* The ITS_BASER structure - contains memory information and cached
|
||||
* value of BASER register configuration.
|
||||
*/
|
||||
struct its_baser {
|
||||
void *base;
|
||||
u64 val;
|
||||
u32 order;
|
||||
};
|
||||
|
||||
/*
|
||||
* The ITS structure - contains most of the infrastructure, with the
|
||||
* top-level MSI domain, the command queue, the collections, and the
|
||||
|
@ -66,14 +76,12 @@ struct its_node {
|
|||
unsigned long phys_base;
|
||||
struct its_cmd_block *cmd_base;
|
||||
struct its_cmd_block *cmd_write;
|
||||
struct {
|
||||
void *base;
|
||||
u32 order;
|
||||
} tables[GITS_BASER_NR_REGS];
|
||||
struct its_baser tables[GITS_BASER_NR_REGS];
|
||||
struct its_collection *collections;
|
||||
struct list_head its_device_list;
|
||||
u64 flags;
|
||||
u32 ite_size;
|
||||
u32 device_ids;
|
||||
};
|
||||
|
||||
#define ITS_ITT_ALIGN SZ_256
|
||||
|
@ -838,6 +846,8 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
|
|||
ids = GITS_TYPER_DEVBITS(typer);
|
||||
}
|
||||
|
||||
its->device_ids = ids;
|
||||
|
||||
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
|
||||
u64 val = readq_relaxed(its->base + GITS_BASER + i * 8);
|
||||
u64 type = GITS_BASER_TYPE(val);
|
||||
|
@ -913,6 +923,7 @@ static int its_alloc_tables(const char *node_name, struct its_node *its)
|
|||
}
|
||||
|
||||
val |= alloc_pages - 1;
|
||||
its->tables[i].val = val;
|
||||
|
||||
writeq_relaxed(val, its->base + GITS_BASER + i * 8);
|
||||
tmp = readq_relaxed(its->base + GITS_BASER + i * 8);
|
||||
|
@ -1138,9 +1149,22 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
|
|||
return its_dev;
|
||||
}
|
||||
|
||||
static struct its_baser *its_get_baser(struct its_node *its, u32 type)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
|
||||
if (GITS_BASER_TYPE(its->tables[i].val) == type)
|
||||
return &its->tables[i];
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
|
||||
int nvecs)
|
||||
{
|
||||
struct its_baser *baser;
|
||||
struct its_device *dev;
|
||||
unsigned long *lpi_map;
|
||||
unsigned long flags;
|
||||
|
@ -1151,6 +1175,16 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
|
|||
int nr_ites;
|
||||
int sz;
|
||||
|
||||
baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
|
||||
|
||||
/* Don't allow 'dev_id' that exceeds single, flat table limit */
|
||||
if (baser) {
|
||||
if (dev_id >= (PAGE_ORDER_TO_SIZE(baser->order) /
|
||||
GITS_BASER_ENTRY_SIZE(baser->val)))
|
||||
return NULL;
|
||||
} else if (ilog2(dev_id) >= its->device_ids)
|
||||
return NULL;
|
||||
|
||||
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
|
||||
/*
|
||||
* At least one bit of EventID is being used, hence a minimum
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/arm-gic-v3.h>
|
||||
#include <linux/irqchip/irq-partition-percpu.h>
|
||||
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/exception.h>
|
||||
|
@ -44,6 +45,7 @@ struct redist_region {
|
|||
};
|
||||
|
||||
struct gic_chip_data {
|
||||
struct fwnode_handle *fwnode;
|
||||
void __iomem *dist_base;
|
||||
struct redist_region *redist_regions;
|
||||
struct rdists rdists;
|
||||
|
@ -51,6 +53,7 @@ struct gic_chip_data {
|
|||
u64 redist_stride;
|
||||
u32 nr_redist_regions;
|
||||
unsigned int irq_nr;
|
||||
struct partition_desc *ppi_descs[16];
|
||||
};
|
||||
|
||||
static struct gic_chip_data gic_data __read_mostly;
|
||||
|
@ -364,6 +367,13 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
|
|||
if (static_key_true(&supports_deactivate))
|
||||
gic_write_dir(irqnr);
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Unlike GICv2, we don't need an smp_rmb() here.
|
||||
* The control dependency from gic_read_iar to
|
||||
* the ISB in gic_write_eoir is enough to ensure
|
||||
* that any shared data read by handle_IPI will
|
||||
* be read after the ACK.
|
||||
*/
|
||||
handle_IPI(irqnr, regs);
|
||||
#else
|
||||
WARN_ONCE(true, "Unexpected SGI received!\n");
|
||||
|
@ -383,6 +393,15 @@ static void __init gic_dist_init(void)
|
|||
writel_relaxed(0, base + GICD_CTLR);
|
||||
gic_dist_wait_for_rwp();
|
||||
|
||||
/*
|
||||
* Configure SPIs as non-secure Group-1. This will only matter
|
||||
* if the GIC only has a single security state. This will not
|
||||
* do the right thing if the kernel is running in secure mode,
|
||||
* but that's not the intended use case anyway.
|
||||
*/
|
||||
for (i = 32; i < gic_data.irq_nr; i += 32)
|
||||
writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
|
||||
|
||||
gic_dist_config(base, gic_data.irq_nr, gic_dist_wait_for_rwp);
|
||||
|
||||
/* Enable distributor with ARE, Group1 */
|
||||
|
@ -500,6 +519,9 @@ static void gic_cpu_init(void)
|
|||
|
||||
rbase = gic_data_rdist_sgi_base();
|
||||
|
||||
/* Configure SGIs/PPIs as non-secure Group-1 */
|
||||
writel_relaxed(~0, rbase + GICR_IGROUPR0);
|
||||
|
||||
gic_cpu_config(rbase, gic_redist_wait_for_rwp);
|
||||
|
||||
/* Give LPIs a spin */
|
||||
|
@ -812,10 +834,62 @@ static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
|
|||
}
|
||||
}
|
||||
|
||||
static int gic_irq_domain_select(struct irq_domain *d,
|
||||
struct irq_fwspec *fwspec,
|
||||
enum irq_domain_bus_token bus_token)
|
||||
{
|
||||
/* Not for us */
|
||||
if (fwspec->fwnode != d->fwnode)
|
||||
return 0;
|
||||
|
||||
/* If this is not DT, then we have a single domain */
|
||||
if (!is_of_node(fwspec->fwnode))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* If this is a PPI and we have a 4th (non-null) parameter,
|
||||
* then we need to match the partition domain.
|
||||
*/
|
||||
if (fwspec->param_count >= 4 &&
|
||||
fwspec->param[0] == 1 && fwspec->param[3] != 0)
|
||||
return d == partition_get_domain(gic_data.ppi_descs[fwspec->param[1]]);
|
||||
|
||||
return d == gic_data.domain;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops gic_irq_domain_ops = {
|
||||
.translate = gic_irq_domain_translate,
|
||||
.alloc = gic_irq_domain_alloc,
|
||||
.free = gic_irq_domain_free,
|
||||
.select = gic_irq_domain_select,
|
||||
};
|
||||
|
||||
static int partition_domain_translate(struct irq_domain *d,
|
||||
struct irq_fwspec *fwspec,
|
||||
unsigned long *hwirq,
|
||||
unsigned int *type)
|
||||
{
|
||||
struct device_node *np;
|
||||
int ret;
|
||||
|
||||
np = of_find_node_by_phandle(fwspec->param[3]);
|
||||
if (WARN_ON(!np))
|
||||
return -EINVAL;
|
||||
|
||||
ret = partition_translate_id(gic_data.ppi_descs[fwspec->param[1]],
|
||||
of_node_to_fwnode(np));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
*hwirq = ret;
|
||||
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops partition_domain_ops = {
|
||||
.translate = partition_domain_translate,
|
||||
.select = gic_irq_domain_select,
|
||||
};
|
||||
|
||||
static void gicv3_enable_quirks(void)
|
||||
|
@ -843,6 +917,7 @@ static int __init gic_init_bases(void __iomem *dist_base,
|
|||
if (static_key_true(&supports_deactivate))
|
||||
pr_info("GIC: Using split EOI/Deactivate mode\n");
|
||||
|
||||
gic_data.fwnode = handle;
|
||||
gic_data.dist_base = dist_base;
|
||||
gic_data.redist_regions = rdist_regs;
|
||||
gic_data.nr_redist_regions = nr_redist_regions;
|
||||
|
@ -901,6 +976,119 @@ static int __init gic_validate_dist_version(void __iomem *dist_base)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int get_cpu_number(struct device_node *dn)
|
||||
{
|
||||
const __be32 *cell;
|
||||
u64 hwid;
|
||||
int i;
|
||||
|
||||
cell = of_get_property(dn, "reg", NULL);
|
||||
if (!cell)
|
||||
return -1;
|
||||
|
||||
hwid = of_read_number(cell, of_n_addr_cells(dn));
|
||||
|
||||
/*
|
||||
* Non affinity bits must be set to 0 in the DT
|
||||
*/
|
||||
if (hwid & ~MPIDR_HWID_BITMASK)
|
||||
return -1;
|
||||
|
||||
for (i = 0; i < num_possible_cpus(); i++)
|
||||
if (cpu_logical_map(i) == hwid)
|
||||
return i;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Create all possible partitions at boot time */
|
||||
static void gic_populate_ppi_partitions(struct device_node *gic_node)
|
||||
{
|
||||
struct device_node *parts_node, *child_part;
|
||||
int part_idx = 0, i;
|
||||
int nr_parts;
|
||||
struct partition_affinity *parts;
|
||||
|
||||
parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
|
||||
if (!parts_node)
|
||||
return;
|
||||
|
||||
nr_parts = of_get_child_count(parts_node);
|
||||
|
||||
if (!nr_parts)
|
||||
return;
|
||||
|
||||
parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
|
||||
if (WARN_ON(!parts))
|
||||
return;
|
||||
|
||||
for_each_child_of_node(parts_node, child_part) {
|
||||
struct partition_affinity *part;
|
||||
int n;
|
||||
|
||||
part = &parts[part_idx];
|
||||
|
||||
part->partition_id = of_node_to_fwnode(child_part);
|
||||
|
||||
pr_info("GIC: PPI partition %s[%d] { ",
|
||||
child_part->name, part_idx);
|
||||
|
||||
n = of_property_count_elems_of_size(child_part, "affinity",
|
||||
sizeof(u32));
|
||||
WARN_ON(n <= 0);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
int err, cpu;
|
||||
u32 cpu_phandle;
|
||||
struct device_node *cpu_node;
|
||||
|
||||
err = of_property_read_u32_index(child_part, "affinity",
|
||||
i, &cpu_phandle);
|
||||
if (WARN_ON(err))
|
||||
continue;
|
||||
|
||||
cpu_node = of_find_node_by_phandle(cpu_phandle);
|
||||
if (WARN_ON(!cpu_node))
|
||||
continue;
|
||||
|
||||
cpu = get_cpu_number(cpu_node);
|
||||
if (WARN_ON(cpu == -1))
|
||||
continue;
|
||||
|
||||
pr_cont("%s[%d] ", cpu_node->full_name, cpu);
|
||||
|
||||
cpumask_set_cpu(cpu, &part->mask);
|
||||
}
|
||||
|
||||
pr_cont("}\n");
|
||||
part_idx++;
|
||||
}
|
||||
|
||||
for (i = 0; i < 16; i++) {
|
||||
unsigned int irq;
|
||||
struct partition_desc *desc;
|
||||
struct irq_fwspec ppi_fwspec = {
|
||||
.fwnode = gic_data.fwnode,
|
||||
.param_count = 3,
|
||||
.param = {
|
||||
[0] = 1,
|
||||
[1] = i,
|
||||
[2] = IRQ_TYPE_NONE,
|
||||
},
|
||||
};
|
||||
|
||||
irq = irq_create_fwspec_mapping(&ppi_fwspec);
|
||||
if (WARN_ON(!irq))
|
||||
continue;
|
||||
desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
|
||||
irq, &partition_domain_ops);
|
||||
if (WARN_ON(!desc))
|
||||
continue;
|
||||
|
||||
gic_data.ppi_descs[i] = desc;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init gic_of_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
void __iomem *dist_base;
|
||||
|
@ -952,8 +1140,11 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare
|
|||
|
||||
err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions,
|
||||
redist_stride, &node->fwnode);
|
||||
if (!err)
|
||||
return 0;
|
||||
if (err)
|
||||
goto out_unmap_rdist;
|
||||
|
||||
gic_populate_ppi_partitions(node);
|
||||
return 0;
|
||||
|
||||
out_unmap_rdist:
|
||||
for (i = 0; i < nr_redist_regions; i++)
|
||||
|
|
|
@ -72,6 +72,9 @@ struct gic_chip_data {
|
|||
struct irq_chip chip;
|
||||
union gic_base dist_base;
|
||||
union gic_base cpu_base;
|
||||
void __iomem *raw_dist_base;
|
||||
void __iomem *raw_cpu_base;
|
||||
u32 percpu_offset;
|
||||
#ifdef CONFIG_CPU_PM
|
||||
u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)];
|
||||
u32 saved_spi_active[DIV_ROUND_UP(1020, 32)];
|
||||
|
@ -344,6 +347,14 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
|
|||
if (static_key_true(&supports_deactivate))
|
||||
writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
|
||||
#ifdef CONFIG_SMP
|
||||
/*
|
||||
* Ensure any shared data written by the CPU sending
|
||||
* the IPI is read after we've read the ACK register
|
||||
* on the GIC.
|
||||
*
|
||||
* Pairs with the write barrier in gic_raise_softirq
|
||||
*/
|
||||
smp_rmb();
|
||||
handle_IPI(irqnr, regs);
|
||||
#endif
|
||||
continue;
|
||||
|
@ -391,20 +402,6 @@ static struct irq_chip gic_chip = {
|
|||
IRQCHIP_MASK_ON_SUSPEND,
|
||||
};
|
||||
|
||||
static struct irq_chip gic_eoimode1_chip = {
|
||||
.name = "GICv2",
|
||||
.irq_mask = gic_eoimode1_mask_irq,
|
||||
.irq_unmask = gic_unmask_irq,
|
||||
.irq_eoi = gic_eoimode1_eoi_irq,
|
||||
.irq_set_type = gic_set_type,
|
||||
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
|
||||
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
|
||||
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
|
||||
.flags = IRQCHIP_SET_TYPE_MASKED |
|
||||
IRQCHIP_SKIP_SET_WAKE |
|
||||
IRQCHIP_MASK_ON_SUSPEND,
|
||||
};
|
||||
|
||||
void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
|
||||
{
|
||||
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
|
||||
|
@ -473,7 +470,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
|
|||
writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL);
|
||||
}
|
||||
|
||||
static void gic_cpu_init(struct gic_chip_data *gic)
|
||||
static int gic_cpu_init(struct gic_chip_data *gic)
|
||||
{
|
||||
void __iomem *dist_base = gic_data_dist_base(gic);
|
||||
void __iomem *base = gic_data_cpu_base(gic);
|
||||
|
@ -489,7 +486,9 @@ static void gic_cpu_init(struct gic_chip_data *gic)
|
|||
/*
|
||||
* Get what the GIC says our CPU mask is.
|
||||
*/
|
||||
BUG_ON(cpu >= NR_GIC_CPU_IF);
|
||||
if (WARN_ON(cpu >= NR_GIC_CPU_IF))
|
||||
return -EINVAL;
|
||||
|
||||
gic_check_cpu_features();
|
||||
cpu_mask = gic_get_cpumask(gic);
|
||||
gic_cpu_map[cpu] = cpu_mask;
|
||||
|
@ -507,6 +506,8 @@ static void gic_cpu_init(struct gic_chip_data *gic)
|
|||
|
||||
writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK);
|
||||
gic_cpu_if_up(gic);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int gic_cpu_if_down(unsigned int gic_nr)
|
||||
|
@ -532,34 +533,35 @@ int gic_cpu_if_down(unsigned int gic_nr)
|
|||
* this function, no interrupts will be delivered by the GIC, and another
|
||||
* platform-specific wakeup source must be enabled.
|
||||
*/
|
||||
static void gic_dist_save(unsigned int gic_nr)
|
||||
static void gic_dist_save(struct gic_chip_data *gic)
|
||||
{
|
||||
unsigned int gic_irqs;
|
||||
void __iomem *dist_base;
|
||||
int i;
|
||||
|
||||
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
|
||||
if (WARN_ON(!gic))
|
||||
return;
|
||||
|
||||
gic_irqs = gic_data[gic_nr].gic_irqs;
|
||||
dist_base = gic_data_dist_base(&gic_data[gic_nr]);
|
||||
gic_irqs = gic->gic_irqs;
|
||||
dist_base = gic_data_dist_base(gic);
|
||||
|
||||
if (!dist_base)
|
||||
return;
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
|
||||
gic_data[gic_nr].saved_spi_conf[i] =
|
||||
gic->saved_spi_conf[i] =
|
||||
readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
|
||||
gic_data[gic_nr].saved_spi_target[i] =
|
||||
gic->saved_spi_target[i] =
|
||||
readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
|
||||
gic_data[gic_nr].saved_spi_enable[i] =
|
||||
gic->saved_spi_enable[i] =
|
||||
readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++)
|
||||
gic_data[gic_nr].saved_spi_active[i] =
|
||||
gic->saved_spi_active[i] =
|
||||
readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
|
||||
}
|
||||
|
||||
|
@ -570,16 +572,17 @@ static void gic_dist_save(unsigned int gic_nr)
|
|||
* handled normally, but any edge interrupts that occured will not be seen by
|
||||
* the GIC and need to be handled by the platform-specific wakeup source.
|
||||
*/
|
||||
static void gic_dist_restore(unsigned int gic_nr)
|
||||
static void gic_dist_restore(struct gic_chip_data *gic)
|
||||
{
|
||||
unsigned int gic_irqs;
|
||||
unsigned int i;
|
||||
void __iomem *dist_base;
|
||||
|
||||
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
|
||||
if (WARN_ON(!gic))
|
||||
return;
|
||||
|
||||
gic_irqs = gic_data[gic_nr].gic_irqs;
|
||||
dist_base = gic_data_dist_base(&gic_data[gic_nr]);
|
||||
gic_irqs = gic->gic_irqs;
|
||||
dist_base = gic_data_dist_base(gic);
|
||||
|
||||
if (!dist_base)
|
||||
return;
|
||||
|
@ -587,7 +590,7 @@ static void gic_dist_restore(unsigned int gic_nr)
|
|||
writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++)
|
||||
writel_relaxed(gic_data[gic_nr].saved_spi_conf[i],
|
||||
writel_relaxed(gic->saved_spi_conf[i],
|
||||
dist_base + GIC_DIST_CONFIG + i * 4);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
|
||||
|
@ -595,85 +598,87 @@ static void gic_dist_restore(unsigned int gic_nr)
|
|||
dist_base + GIC_DIST_PRI + i * 4);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++)
|
||||
writel_relaxed(gic_data[gic_nr].saved_spi_target[i],
|
||||
writel_relaxed(gic->saved_spi_target[i],
|
||||
dist_base + GIC_DIST_TARGET + i * 4);
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
|
||||
writel_relaxed(GICD_INT_EN_CLR_X32,
|
||||
dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
|
||||
writel_relaxed(gic_data[gic_nr].saved_spi_enable[i],
|
||||
writel_relaxed(gic->saved_spi_enable[i],
|
||||
dist_base + GIC_DIST_ENABLE_SET + i * 4);
|
||||
}
|
||||
|
||||
for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) {
|
||||
writel_relaxed(GICD_INT_EN_CLR_X32,
|
||||
dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
|
||||
writel_relaxed(gic_data[gic_nr].saved_spi_active[i],
|
||||
writel_relaxed(gic->saved_spi_active[i],
|
||||
dist_base + GIC_DIST_ACTIVE_SET + i * 4);
|
||||
}
|
||||
|
||||
writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL);
|
||||
}
|
||||
|
||||
static void gic_cpu_save(unsigned int gic_nr)
|
||||
static void gic_cpu_save(struct gic_chip_data *gic)
|
||||
{
|
||||
int i;
|
||||
u32 *ptr;
|
||||
void __iomem *dist_base;
|
||||
void __iomem *cpu_base;
|
||||
|
||||
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
|
||||
if (WARN_ON(!gic))
|
||||
return;
|
||||
|
||||
dist_base = gic_data_dist_base(&gic_data[gic_nr]);
|
||||
cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
|
||||
dist_base = gic_data_dist_base(gic);
|
||||
cpu_base = gic_data_cpu_base(gic);
|
||||
|
||||
if (!dist_base || !cpu_base)
|
||||
return;
|
||||
|
||||
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
|
||||
ptr = raw_cpu_ptr(gic->saved_ppi_enable);
|
||||
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
|
||||
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4);
|
||||
|
||||
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
|
||||
ptr = raw_cpu_ptr(gic->saved_ppi_active);
|
||||
for (i = 0; i < DIV_ROUND_UP(32, 32); i++)
|
||||
ptr[i] = readl_relaxed(dist_base + GIC_DIST_ACTIVE_SET + i * 4);
|
||||
|
||||
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
|
||||
ptr = raw_cpu_ptr(gic->saved_ppi_conf);
|
||||
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
|
||||
ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4);
|
||||
|
||||
}
|
||||
|
||||
static void gic_cpu_restore(unsigned int gic_nr)
|
||||
static void gic_cpu_restore(struct gic_chip_data *gic)
|
||||
{
|
||||
int i;
|
||||
u32 *ptr;
|
||||
void __iomem *dist_base;
|
||||
void __iomem *cpu_base;
|
||||
|
||||
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
|
||||
if (WARN_ON(!gic))
|
||||
return;
|
||||
|
||||
dist_base = gic_data_dist_base(&gic_data[gic_nr]);
|
||||
cpu_base = gic_data_cpu_base(&gic_data[gic_nr]);
|
||||
dist_base = gic_data_dist_base(gic);
|
||||
cpu_base = gic_data_cpu_base(gic);
|
||||
|
||||
if (!dist_base || !cpu_base)
|
||||
return;
|
||||
|
||||
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable);
|
||||
ptr = raw_cpu_ptr(gic->saved_ppi_enable);
|
||||
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
|
||||
writel_relaxed(GICD_INT_EN_CLR_X32,
|
||||
dist_base + GIC_DIST_ENABLE_CLEAR + i * 4);
|
||||
writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4);
|
||||
}
|
||||
|
||||
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_active);
|
||||
ptr = raw_cpu_ptr(gic->saved_ppi_active);
|
||||
for (i = 0; i < DIV_ROUND_UP(32, 32); i++) {
|
||||
writel_relaxed(GICD_INT_EN_CLR_X32,
|
||||
dist_base + GIC_DIST_ACTIVE_CLEAR + i * 4);
|
||||
writel_relaxed(ptr[i], dist_base + GIC_DIST_ACTIVE_SET + i * 4);
|
||||
}
|
||||
|
||||
ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf);
|
||||
ptr = raw_cpu_ptr(gic->saved_ppi_conf);
|
||||
for (i = 0; i < DIV_ROUND_UP(32, 16); i++)
|
||||
writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4);
|
||||
|
||||
|
@ -682,7 +687,7 @@ static void gic_cpu_restore(unsigned int gic_nr)
|
|||
dist_base + GIC_DIST_PRI + i * 4);
|
||||
|
||||
writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK);
|
||||
gic_cpu_if_up(&gic_data[gic_nr]);
|
||||
gic_cpu_if_up(gic);
|
||||
}
|
||||
|
||||
static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
|
||||
|
@ -697,18 +702,18 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
|
|||
#endif
|
||||
switch (cmd) {
|
||||
case CPU_PM_ENTER:
|
||||
gic_cpu_save(i);
|
||||
gic_cpu_save(&gic_data[i]);
|
||||
break;
|
||||
case CPU_PM_ENTER_FAILED:
|
||||
case CPU_PM_EXIT:
|
||||
gic_cpu_restore(i);
|
||||
gic_cpu_restore(&gic_data[i]);
|
||||
break;
|
||||
case CPU_CLUSTER_PM_ENTER:
|
||||
gic_dist_save(i);
|
||||
gic_dist_save(&gic_data[i]);
|
||||
break;
|
||||
case CPU_CLUSTER_PM_ENTER_FAILED:
|
||||
case CPU_CLUSTER_PM_EXIT:
|
||||
gic_dist_restore(i);
|
||||
gic_dist_restore(&gic_data[i]);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -720,26 +725,39 @@ static struct notifier_block gic_notifier_block = {
|
|||
.notifier_call = gic_notifier,
|
||||
};
|
||||
|
||||
static void __init gic_pm_init(struct gic_chip_data *gic)
|
||||
static int __init gic_pm_init(struct gic_chip_data *gic)
|
||||
{
|
||||
gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
|
||||
sizeof(u32));
|
||||
BUG_ON(!gic->saved_ppi_enable);
|
||||
if (WARN_ON(!gic->saved_ppi_enable))
|
||||
return -ENOMEM;
|
||||
|
||||
gic->saved_ppi_active = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4,
|
||||
sizeof(u32));
|
||||
BUG_ON(!gic->saved_ppi_active);
|
||||
if (WARN_ON(!gic->saved_ppi_active))
|
||||
goto free_ppi_enable;
|
||||
|
||||
gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4,
|
||||
sizeof(u32));
|
||||
BUG_ON(!gic->saved_ppi_conf);
|
||||
if (WARN_ON(!gic->saved_ppi_conf))
|
||||
goto free_ppi_active;
|
||||
|
||||
if (gic == &gic_data[0])
|
||||
cpu_pm_register_notifier(&gic_notifier_block);
|
||||
|
||||
return 0;
|
||||
|
||||
free_ppi_active:
|
||||
free_percpu(gic->saved_ppi_active);
|
||||
free_ppi_enable:
|
||||
free_percpu(gic->saved_ppi_enable);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
#else
|
||||
static void __init gic_pm_init(struct gic_chip_data *gic)
|
||||
static int __init gic_pm_init(struct gic_chip_data *gic)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1012,61 +1030,63 @@ static const struct irq_domain_ops gic_irq_domain_ops = {
|
|||
.unmap = gic_irq_domain_unmap,
|
||||
};
|
||||
|
||||
static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
|
||||
void __iomem *dist_base, void __iomem *cpu_base,
|
||||
u32 percpu_offset, struct fwnode_handle *handle)
|
||||
static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start,
|
||||
struct fwnode_handle *handle)
|
||||
{
|
||||
irq_hw_number_t hwirq_base;
|
||||
struct gic_chip_data *gic;
|
||||
int gic_irqs, irq_base, i;
|
||||
int gic_irqs, irq_base, i, ret;
|
||||
|
||||
BUG_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR);
|
||||
|
||||
gic = &gic_data[gic_nr];
|
||||
if (WARN_ON(!gic || gic->domain))
|
||||
return -EINVAL;
|
||||
|
||||
/* Initialize irq_chip */
|
||||
if (static_key_true(&supports_deactivate) && gic_nr == 0) {
|
||||
gic->chip = gic_eoimode1_chip;
|
||||
gic->chip = gic_chip;
|
||||
|
||||
if (static_key_true(&supports_deactivate) && gic == &gic_data[0]) {
|
||||
gic->chip.irq_mask = gic_eoimode1_mask_irq;
|
||||
gic->chip.irq_eoi = gic_eoimode1_eoi_irq;
|
||||
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
|
||||
gic->chip.name = kasprintf(GFP_KERNEL, "GICv2");
|
||||
} else {
|
||||
gic->chip = gic_chip;
|
||||
gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
|
||||
gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d",
|
||||
(int)(gic - &gic_data[0]));
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
if (gic_nr == 0)
|
||||
if (gic == &gic_data[0])
|
||||
gic->chip.irq_set_affinity = gic_set_affinity;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_GIC_NON_BANKED
|
||||
if (percpu_offset) { /* Frankein-GIC without banked registers... */
|
||||
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
|
||||
/* Frankein-GIC without banked registers... */
|
||||
unsigned int cpu;
|
||||
|
||||
gic->dist_base.percpu_base = alloc_percpu(void __iomem *);
|
||||
gic->cpu_base.percpu_base = alloc_percpu(void __iomem *);
|
||||
if (WARN_ON(!gic->dist_base.percpu_base ||
|
||||
!gic->cpu_base.percpu_base)) {
|
||||
free_percpu(gic->dist_base.percpu_base);
|
||||
free_percpu(gic->cpu_base.percpu_base);
|
||||
return;
|
||||
ret = -ENOMEM;
|
||||
goto error;
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
u32 mpidr = cpu_logical_map(cpu);
|
||||
u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
|
||||
unsigned long offset = percpu_offset * core_id;
|
||||
*per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
|
||||
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
|
||||
unsigned long offset = gic->percpu_offset * core_id;
|
||||
*per_cpu_ptr(gic->dist_base.percpu_base, cpu) =
|
||||
gic->raw_dist_base + offset;
|
||||
*per_cpu_ptr(gic->cpu_base.percpu_base, cpu) =
|
||||
gic->raw_cpu_base + offset;
|
||||
}
|
||||
|
||||
gic_set_base_accessor(gic, gic_get_percpu_base);
|
||||
} else
|
||||
#endif
|
||||
{ /* Normal, sane GIC... */
|
||||
WARN(percpu_offset,
|
||||
} else {
|
||||
/* Normal, sane GIC... */
|
||||
WARN(gic->percpu_offset,
|
||||
"GIC_NON_BANKED not enabled, ignoring %08x offset!",
|
||||
percpu_offset);
|
||||
gic->dist_base.common_base = dist_base;
|
||||
gic->cpu_base.common_base = cpu_base;
|
||||
gic->percpu_offset);
|
||||
gic->dist_base.common_base = gic->raw_dist_base;
|
||||
gic->cpu_base.common_base = gic->raw_cpu_base;
|
||||
gic_set_base_accessor(gic, gic_get_common_base);
|
||||
}
|
||||
|
||||
|
@ -1089,7 +1109,7 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
|
|||
* For primary GICs, skip over SGIs.
|
||||
* For secondary GICs, skip over PPIs, too.
|
||||
*/
|
||||
if (gic_nr == 0 && (irq_start & 31) > 0) {
|
||||
if (gic == &gic_data[0] && (irq_start & 31) > 0) {
|
||||
hwirq_base = 16;
|
||||
if (irq_start != -1)
|
||||
irq_start = (irq_start & ~31) + 16;
|
||||
|
@ -1111,10 +1131,12 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
|
|||
hwirq_base, &gic_irq_domain_ops, gic);
|
||||
}
|
||||
|
||||
if (WARN_ON(!gic->domain))
|
||||
return;
|
||||
if (WARN_ON(!gic->domain)) {
|
||||
ret = -ENODEV;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (gic_nr == 0) {
|
||||
if (gic == &gic_data[0]) {
|
||||
/*
|
||||
* Initialize the CPU interface map to all CPUs.
|
||||
* It will be refined as each CPU probes its ID.
|
||||
|
@ -1132,19 +1154,57 @@ static void __init __gic_init_bases(unsigned int gic_nr, int irq_start,
|
|||
}
|
||||
|
||||
gic_dist_init(gic);
|
||||
gic_cpu_init(gic);
|
||||
gic_pm_init(gic);
|
||||
ret = gic_cpu_init(gic);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
ret = gic_pm_init(gic);
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
if (IS_ENABLED(CONFIG_GIC_NON_BANKED) && gic->percpu_offset) {
|
||||
free_percpu(gic->dist_base.percpu_base);
|
||||
free_percpu(gic->cpu_base.percpu_base);
|
||||
}
|
||||
|
||||
kfree(gic->chip.name);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void __init gic_init(unsigned int gic_nr, int irq_start,
|
||||
void __iomem *dist_base, void __iomem *cpu_base)
|
||||
{
|
||||
struct gic_chip_data *gic;
|
||||
|
||||
if (WARN_ON(gic_nr >= CONFIG_ARM_GIC_MAX_NR))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Non-DT/ACPI systems won't run a hypervisor, so let's not
|
||||
* bother with these...
|
||||
*/
|
||||
static_key_slow_dec(&supports_deactivate);
|
||||
__gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL);
|
||||
|
||||
gic = &gic_data[gic_nr];
|
||||
gic->raw_dist_base = dist_base;
|
||||
gic->raw_cpu_base = cpu_base;
|
||||
|
||||
__gic_init_bases(gic, irq_start, NULL);
|
||||
}
|
||||
|
||||
static void gic_teardown(struct gic_chip_data *gic)
|
||||
{
|
||||
if (WARN_ON(!gic))
|
||||
return;
|
||||
|
||||
if (gic->raw_dist_base)
|
||||
iounmap(gic->raw_dist_base);
|
||||
if (gic->raw_cpu_base)
|
||||
iounmap(gic->raw_cpu_base);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
|
@ -1188,35 +1248,61 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int gic_of_setup(struct gic_chip_data *gic, struct device_node *node)
|
||||
{
|
||||
if (!gic || !node)
|
||||
return -EINVAL;
|
||||
|
||||
gic->raw_dist_base = of_iomap(node, 0);
|
||||
if (WARN(!gic->raw_dist_base, "unable to map gic dist registers\n"))
|
||||
goto error;
|
||||
|
||||
gic->raw_cpu_base = of_iomap(node, 1);
|
||||
if (WARN(!gic->raw_cpu_base, "unable to map gic cpu registers\n"))
|
||||
goto error;
|
||||
|
||||
if (of_property_read_u32(node, "cpu-offset", &gic->percpu_offset))
|
||||
gic->percpu_offset = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
gic_teardown(gic);
|
||||
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
int __init
|
||||
gic_of_init(struct device_node *node, struct device_node *parent)
|
||||
{
|
||||
void __iomem *cpu_base;
|
||||
void __iomem *dist_base;
|
||||
u32 percpu_offset;
|
||||
int irq;
|
||||
struct gic_chip_data *gic;
|
||||
int irq, ret;
|
||||
|
||||
if (WARN_ON(!node))
|
||||
return -ENODEV;
|
||||
|
||||
dist_base = of_iomap(node, 0);
|
||||
WARN(!dist_base, "unable to map gic dist registers\n");
|
||||
if (WARN_ON(gic_cnt >= CONFIG_ARM_GIC_MAX_NR))
|
||||
return -EINVAL;
|
||||
|
||||
cpu_base = of_iomap(node, 1);
|
||||
WARN(!cpu_base, "unable to map gic cpu registers\n");
|
||||
gic = &gic_data[gic_cnt];
|
||||
|
||||
ret = gic_of_setup(gic, node);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Disable split EOI/Deactivate if either HYP is not available
|
||||
* or the CPU interface is too small.
|
||||
*/
|
||||
if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base))
|
||||
if (gic_cnt == 0 && !gic_check_eoimode(node, &gic->raw_cpu_base))
|
||||
static_key_slow_dec(&supports_deactivate);
|
||||
|
||||
if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
|
||||
percpu_offset = 0;
|
||||
ret = __gic_init_bases(gic, -1, &node->fwnode);
|
||||
if (ret) {
|
||||
gic_teardown(gic);
|
||||
return ret;
|
||||
}
|
||||
|
||||
__gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset,
|
||||
&node->fwnode);
|
||||
if (!gic_cnt)
|
||||
gic_init_physaddr(node);
|
||||
|
||||
|
@ -1303,9 +1389,9 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
|
|||
const unsigned long end)
|
||||
{
|
||||
struct acpi_madt_generic_distributor *dist;
|
||||
void __iomem *cpu_base, *dist_base;
|
||||
struct fwnode_handle *domain_handle;
|
||||
int count;
|
||||
struct gic_chip_data *gic = &gic_data[0];
|
||||
int count, ret;
|
||||
|
||||
/* Collect CPU base addresses */
|
||||
count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
|
||||
|
@ -1315,17 +1401,18 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
|
||||
if (!cpu_base) {
|
||||
gic->raw_cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE);
|
||||
if (!gic->raw_cpu_base) {
|
||||
pr_err("Unable to map GICC registers\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
dist = (struct acpi_madt_generic_distributor *)header;
|
||||
dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE);
|
||||
if (!dist_base) {
|
||||
gic->raw_dist_base = ioremap(dist->base_address,
|
||||
ACPI_GICV2_DIST_MEM_SIZE);
|
||||
if (!gic->raw_dist_base) {
|
||||
pr_err("Unable to map GICD registers\n");
|
||||
iounmap(cpu_base);
|
||||
gic_teardown(gic);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -1340,15 +1427,20 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header,
|
|||
/*
|
||||
* Initialize GIC instance zero (no multi-GIC support).
|
||||
*/
|
||||
domain_handle = irq_domain_alloc_fwnode(dist_base);
|
||||
domain_handle = irq_domain_alloc_fwnode(gic->raw_dist_base);
|
||||
if (!domain_handle) {
|
||||
pr_err("Unable to allocate domain handle\n");
|
||||
iounmap(cpu_base);
|
||||
iounmap(dist_base);
|
||||
gic_teardown(gic);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
__gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle);
|
||||
ret = __gic_init_bases(gic, -1, domain_handle);
|
||||
if (ret) {
|
||||
pr_err("Failed to initialise GIC\n");
|
||||
irq_domain_free_fwnode(domain_handle);
|
||||
gic_teardown(gic);
|
||||
return ret;
|
||||
}
|
||||
|
||||
acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle);
|
||||
|
||||
|
|
238
drivers/irqchip/irq-lpc32xx.c
Normal file
238
drivers/irqchip/irq-lpc32xx.c
Normal file
|
@ -0,0 +1,238 @@
|
|||
/*
|
||||
* Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
|
||||
*
|
||||
* The code contained herein is licensed under the GNU General Public
|
||||
* License. You may obtain a copy of the GNU General Public License
|
||||
* Version 2 or later at the following locations:
|
||||
*
|
||||
* http://www.opensource.org/licenses/gpl-license.html
|
||||
* http://www.gnu.org/copyleft/gpl.html
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) "%s: " fmt, __func__
|
||||
|
||||
#include <linux/io.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/exception.h>
|
||||
|
||||
#define LPC32XX_INTC_MASK 0x00
|
||||
#define LPC32XX_INTC_RAW 0x04
|
||||
#define LPC32XX_INTC_STAT 0x08
|
||||
#define LPC32XX_INTC_POL 0x0C
|
||||
#define LPC32XX_INTC_TYPE 0x10
|
||||
#define LPC32XX_INTC_FIQ 0x14
|
||||
|
||||
#define NR_LPC32XX_IC_IRQS 32
|
||||
|
||||
struct lpc32xx_irq_chip {
|
||||
void __iomem *base;
|
||||
struct irq_domain *domain;
|
||||
struct irq_chip chip;
|
||||
};
|
||||
|
||||
static struct lpc32xx_irq_chip *lpc32xx_mic_irqc;
|
||||
|
||||
static inline u32 lpc32xx_ic_read(struct lpc32xx_irq_chip *ic, u32 reg)
|
||||
{
|
||||
return readl_relaxed(ic->base + reg);
|
||||
}
|
||||
|
||||
static inline void lpc32xx_ic_write(struct lpc32xx_irq_chip *ic,
|
||||
u32 reg, u32 val)
|
||||
{
|
||||
writel_relaxed(val, ic->base + reg);
|
||||
}
|
||||
|
||||
static void lpc32xx_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
|
||||
u32 val, mask = BIT(d->hwirq);
|
||||
|
||||
val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) & ~mask;
|
||||
lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
|
||||
}
|
||||
|
||||
static void lpc32xx_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
|
||||
u32 val, mask = BIT(d->hwirq);
|
||||
|
||||
val = lpc32xx_ic_read(ic, LPC32XX_INTC_MASK) | mask;
|
||||
lpc32xx_ic_write(ic, LPC32XX_INTC_MASK, val);
|
||||
}
|
||||
|
||||
static void lpc32xx_irq_ack(struct irq_data *d)
|
||||
{
|
||||
struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
|
||||
u32 mask = BIT(d->hwirq);
|
||||
|
||||
lpc32xx_ic_write(ic, LPC32XX_INTC_RAW, mask);
|
||||
}
|
||||
|
||||
static int lpc32xx_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
struct lpc32xx_irq_chip *ic = irq_data_get_irq_chip_data(d);
|
||||
u32 val, mask = BIT(d->hwirq);
|
||||
bool high, edge;
|
||||
|
||||
switch (type) {
|
||||
case IRQ_TYPE_EDGE_RISING:
|
||||
edge = true;
|
||||
high = true;
|
||||
break;
|
||||
case IRQ_TYPE_EDGE_FALLING:
|
||||
edge = true;
|
||||
high = false;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_HIGH:
|
||||
edge = false;
|
||||
high = true;
|
||||
break;
|
||||
case IRQ_TYPE_LEVEL_LOW:
|
||||
edge = false;
|
||||
high = false;
|
||||
break;
|
||||
default:
|
||||
pr_info("unsupported irq type %d\n", type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irqd_set_trigger_type(d, type);
|
||||
|
||||
val = lpc32xx_ic_read(ic, LPC32XX_INTC_POL);
|
||||
if (high)
|
||||
val |= mask;
|
||||
else
|
||||
val &= ~mask;
|
||||
lpc32xx_ic_write(ic, LPC32XX_INTC_POL, val);
|
||||
|
||||
val = lpc32xx_ic_read(ic, LPC32XX_INTC_TYPE);
|
||||
if (edge) {
|
||||
val |= mask;
|
||||
irq_set_handler_locked(d, handle_edge_irq);
|
||||
} else {
|
||||
val &= ~mask;
|
||||
irq_set_handler_locked(d, handle_level_irq);
|
||||
}
|
||||
lpc32xx_ic_write(ic, LPC32XX_INTC_TYPE, val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exception_irq_entry lpc32xx_handle_irq(struct pt_regs *regs)
|
||||
{
|
||||
struct lpc32xx_irq_chip *ic = lpc32xx_mic_irqc;
|
||||
u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
|
||||
|
||||
while (hwirq) {
|
||||
irq = __ffs(hwirq);
|
||||
hwirq &= ~BIT(irq);
|
||||
handle_domain_irq(lpc32xx_mic_irqc->domain, irq, regs);
|
||||
}
|
||||
}
|
||||
|
||||
static void lpc32xx_sic_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct lpc32xx_irq_chip *ic = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
u32 hwirq = lpc32xx_ic_read(ic, LPC32XX_INTC_STAT), irq;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
while (hwirq) {
|
||||
irq = __ffs(hwirq);
|
||||
hwirq &= ~BIT(irq);
|
||||
generic_handle_irq(irq_find_mapping(ic->domain, irq));
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int lpc32xx_irq_domain_map(struct irq_domain *id, unsigned int virq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
struct lpc32xx_irq_chip *ic = id->host_data;
|
||||
|
||||
irq_set_chip_data(virq, ic);
|
||||
irq_set_chip_and_handler(virq, &ic->chip, handle_level_irq);
|
||||
irq_set_status_flags(virq, IRQ_LEVEL);
|
||||
irq_set_noprobe(virq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void lpc32xx_irq_domain_unmap(struct irq_domain *id, unsigned int virq)
|
||||
{
|
||||
irq_set_chip_and_handler(virq, NULL, NULL);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops lpc32xx_irq_domain_ops = {
|
||||
.map = lpc32xx_irq_domain_map,
|
||||
.unmap = lpc32xx_irq_domain_unmap,
|
||||
.xlate = irq_domain_xlate_twocell,
|
||||
};
|
||||
|
||||
static int __init lpc32xx_of_ic_init(struct device_node *node,
|
||||
struct device_node *parent)
|
||||
{
|
||||
struct lpc32xx_irq_chip *irqc;
|
||||
bool is_mic = of_device_is_compatible(node, "nxp,lpc3220-mic");
|
||||
const __be32 *reg = of_get_property(node, "reg", NULL);
|
||||
u32 parent_irq, i, addr = reg ? be32_to_cpu(*reg) : 0;
|
||||
|
||||
irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
|
||||
if (!irqc)
|
||||
return -ENOMEM;
|
||||
|
||||
irqc->base = of_iomap(node, 0);
|
||||
if (!irqc->base) {
|
||||
pr_err("%s: unable to map registers\n", node->full_name);
|
||||
kfree(irqc);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
irqc->chip.irq_ack = lpc32xx_irq_ack;
|
||||
irqc->chip.irq_mask = lpc32xx_irq_mask;
|
||||
irqc->chip.irq_unmask = lpc32xx_irq_unmask;
|
||||
irqc->chip.irq_set_type = lpc32xx_irq_set_type;
|
||||
if (is_mic)
|
||||
irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.mic", addr);
|
||||
else
|
||||
irqc->chip.name = kasprintf(GFP_KERNEL, "%08x.sic", addr);
|
||||
|
||||
irqc->domain = irq_domain_add_linear(node, NR_LPC32XX_IC_IRQS,
|
||||
&lpc32xx_irq_domain_ops, irqc);
|
||||
if (!irqc->domain) {
|
||||
pr_err("unable to add irq domain\n");
|
||||
iounmap(irqc->base);
|
||||
kfree(irqc->chip.name);
|
||||
kfree(irqc);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (is_mic) {
|
||||
lpc32xx_mic_irqc = irqc;
|
||||
set_handle_irq(lpc32xx_handle_irq);
|
||||
} else {
|
||||
for (i = 0; i < of_irq_count(node); i++) {
|
||||
parent_irq = irq_of_parse_and_map(node, i);
|
||||
if (parent_irq)
|
||||
irq_set_chained_handler_and_data(parent_irq,
|
||||
lpc32xx_sic_handler, irqc);
|
||||
}
|
||||
}
|
||||
|
||||
lpc32xx_ic_write(irqc, LPC32XX_INTC_MASK, 0x00);
|
||||
lpc32xx_ic_write(irqc, LPC32XX_INTC_POL, 0x00);
|
||||
lpc32xx_ic_write(irqc, LPC32XX_INTC_TYPE, 0x00);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
IRQCHIP_DECLARE(nxp_lpc32xx_mic, "nxp,lpc3220-mic", lpc32xx_of_ic_init);
|
||||
IRQCHIP_DECLARE(nxp_lpc32xx_sic, "nxp,lpc3220-sic", lpc32xx_of_ic_init);
|
240
drivers/irqchip/irq-ls-scfg-msi.c
Normal file
240
drivers/irqchip/irq-ls-scfg-msi.c
Normal file
|
@ -0,0 +1,240 @@
|
|||
/*
|
||||
* Freescale SCFG MSI(-X) support
|
||||
*
|
||||
* Copyright (C) 2016 Freescale Semiconductor.
|
||||
*
|
||||
* Author: Minghuan Lian <Minghuan.Lian@nxp.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/msi.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#define MSI_MAX_IRQS 32
|
||||
#define MSI_IBS_SHIFT 3
|
||||
#define MSIR 4
|
||||
|
||||
struct ls_scfg_msi {
|
||||
spinlock_t lock;
|
||||
struct platform_device *pdev;
|
||||
struct irq_domain *parent;
|
||||
struct irq_domain *msi_domain;
|
||||
void __iomem *regs;
|
||||
phys_addr_t msiir_addr;
|
||||
int irq;
|
||||
DECLARE_BITMAP(used, MSI_MAX_IRQS);
|
||||
};
|
||||
|
||||
static struct irq_chip ls_scfg_msi_irq_chip = {
|
||||
.name = "MSI",
|
||||
.irq_mask = pci_msi_mask_irq,
|
||||
.irq_unmask = pci_msi_unmask_irq,
|
||||
};
|
||||
|
||||
static struct msi_domain_info ls_scfg_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS |
|
||||
MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
.chip = &ls_scfg_msi_irq_chip,
|
||||
};
|
||||
|
||||
static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
|
||||
{
|
||||
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(data);
|
||||
|
||||
msg->address_hi = upper_32_bits(msi_data->msiir_addr);
|
||||
msg->address_lo = lower_32_bits(msi_data->msiir_addr);
|
||||
msg->data = data->hwirq << MSI_IBS_SHIFT;
|
||||
}
|
||||
|
||||
static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
|
||||
const struct cpumask *mask, bool force)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct irq_chip ls_scfg_msi_parent_chip = {
|
||||
.name = "SCFG",
|
||||
.irq_compose_msi_msg = ls_scfg_msi_compose_msg,
|
||||
.irq_set_affinity = ls_scfg_msi_set_affinity,
|
||||
};
|
||||
|
||||
static int ls_scfg_msi_domain_irq_alloc(struct irq_domain *domain,
|
||||
unsigned int virq,
|
||||
unsigned int nr_irqs,
|
||||
void *args)
|
||||
{
|
||||
struct ls_scfg_msi *msi_data = domain->host_data;
|
||||
int pos, err = 0;
|
||||
|
||||
WARN_ON(nr_irqs != 1);
|
||||
|
||||
spin_lock(&msi_data->lock);
|
||||
pos = find_first_zero_bit(msi_data->used, MSI_MAX_IRQS);
|
||||
if (pos < MSI_MAX_IRQS)
|
||||
__set_bit(pos, msi_data->used);
|
||||
else
|
||||
err = -ENOSPC;
|
||||
spin_unlock(&msi_data->lock);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
irq_domain_set_info(domain, virq, pos,
|
||||
&ls_scfg_msi_parent_chip, msi_data,
|
||||
handle_simple_irq, NULL, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ls_scfg_msi_domain_irq_free(struct irq_domain *domain,
|
||||
unsigned int virq, unsigned int nr_irqs)
|
||||
{
|
||||
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
||||
struct ls_scfg_msi *msi_data = irq_data_get_irq_chip_data(d);
|
||||
int pos;
|
||||
|
||||
pos = d->hwirq;
|
||||
if (pos < 0 || pos >= MSI_MAX_IRQS) {
|
||||
pr_err("failed to teardown msi. Invalid hwirq %d\n", pos);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&msi_data->lock);
|
||||
__clear_bit(pos, msi_data->used);
|
||||
spin_unlock(&msi_data->lock);
|
||||
}
|
||||
|
||||
static const struct irq_domain_ops ls_scfg_msi_domain_ops = {
|
||||
.alloc = ls_scfg_msi_domain_irq_alloc,
|
||||
.free = ls_scfg_msi_domain_irq_free,
|
||||
};
|
||||
|
||||
static void ls_scfg_msi_irq_handler(struct irq_desc *desc)
|
||||
{
|
||||
struct ls_scfg_msi *msi_data = irq_desc_get_handler_data(desc);
|
||||
unsigned long val;
|
||||
int pos, virq;
|
||||
|
||||
chained_irq_enter(irq_desc_get_chip(desc), desc);
|
||||
|
||||
val = ioread32be(msi_data->regs + MSIR);
|
||||
for_each_set_bit(pos, &val, MSI_MAX_IRQS) {
|
||||
virq = irq_find_mapping(msi_data->parent, (31 - pos));
|
||||
if (virq)
|
||||
generic_handle_irq(virq);
|
||||
}
|
||||
|
||||
chained_irq_exit(irq_desc_get_chip(desc), desc);
|
||||
}
|
||||
|
||||
static int ls_scfg_msi_domains_init(struct ls_scfg_msi *msi_data)
|
||||
{
|
||||
/* Initialize MSI domain parent */
|
||||
msi_data->parent = irq_domain_add_linear(NULL,
|
||||
MSI_MAX_IRQS,
|
||||
&ls_scfg_msi_domain_ops,
|
||||
msi_data);
|
||||
if (!msi_data->parent) {
|
||||
dev_err(&msi_data->pdev->dev, "failed to create IRQ domain\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
msi_data->msi_domain = pci_msi_create_irq_domain(
|
||||
of_node_to_fwnode(msi_data->pdev->dev.of_node),
|
||||
&ls_scfg_msi_domain_info,
|
||||
msi_data->parent);
|
||||
if (!msi_data->msi_domain) {
|
||||
dev_err(&msi_data->pdev->dev, "failed to create MSI domain\n");
|
||||
irq_domain_remove(msi_data->parent);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ls_scfg_msi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct ls_scfg_msi *msi_data;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
|
||||
msi_data = devm_kzalloc(&pdev->dev, sizeof(*msi_data), GFP_KERNEL);
|
||||
if (!msi_data)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
msi_data->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(msi_data->regs)) {
|
||||
dev_err(&pdev->dev, "failed to initialize 'regs'\n");
|
||||
return PTR_ERR(msi_data->regs);
|
||||
}
|
||||
msi_data->msiir_addr = res->start;
|
||||
|
||||
msi_data->irq = platform_get_irq(pdev, 0);
|
||||
if (msi_data->irq <= 0) {
|
||||
dev_err(&pdev->dev, "failed to get MSI irq\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
msi_data->pdev = pdev;
|
||||
spin_lock_init(&msi_data->lock);
|
||||
|
||||
ret = ls_scfg_msi_domains_init(msi_data);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
irq_set_chained_handler_and_data(msi_data->irq,
|
||||
ls_scfg_msi_irq_handler,
|
||||
msi_data);
|
||||
|
||||
platform_set_drvdata(pdev, msi_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ls_scfg_msi_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct ls_scfg_msi *msi_data = platform_get_drvdata(pdev);
|
||||
|
||||
irq_set_chained_handler_and_data(msi_data->irq, NULL, NULL);
|
||||
|
||||
irq_domain_remove(msi_data->msi_domain);
|
||||
irq_domain_remove(msi_data->parent);
|
||||
|
||||
platform_set_drvdata(pdev, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct of_device_id ls_scfg_msi_id[] = {
|
||||
{ .compatible = "fsl,1s1021a-msi", },
|
||||
{ .compatible = "fsl,1s1043a-msi", },
|
||||
{},
|
||||
};
|
||||
|
||||
static struct platform_driver ls_scfg_msi_driver = {
|
||||
.driver = {
|
||||
.name = "ls-scfg-msi",
|
||||
.of_match_table = ls_scfg_msi_id,
|
||||
},
|
||||
.probe = ls_scfg_msi_probe,
|
||||
.remove = ls_scfg_msi_remove,
|
||||
};
|
||||
|
||||
module_platform_driver(ls_scfg_msi_driver);
|
||||
|
||||
MODULE_AUTHOR("Minghuan Lian <Minghuan.Lian@nxp.com>");
|
||||
MODULE_DESCRIPTION("Freescale Layerscape SCFG MSI controller driver");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -263,8 +263,8 @@ static int mbigen_device_probe(struct platform_device *pdev)
|
|||
|
||||
parent = platform_bus_type.dev_root;
|
||||
child = of_platform_device_create(np, NULL, parent);
|
||||
if (IS_ERR(child))
|
||||
return PTR_ERR(child);
|
||||
if (!child)
|
||||
return -ENOMEM;
|
||||
|
||||
if (of_property_read_u32(child->dev.of_node, "num-pins",
|
||||
&num_pins) < 0) {
|
||||
|
|
256
drivers/irqchip/irq-partition-percpu.c
Normal file
256
drivers/irqchip/irq-partition-percpu.c
Normal file
|
@ -0,0 +1,256 @@
|
|||
/*
|
||||
* Copyright (C) 2016 ARM Limited, All Rights Reserved.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irqchip.h>
|
||||
#include <linux/irqchip/chained_irq.h>
|
||||
#include <linux/irqchip/irq-partition-percpu.h>
|
||||
#include <linux/irqdomain.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
struct partition_desc {
|
||||
int nr_parts;
|
||||
struct partition_affinity *parts;
|
||||
struct irq_domain *domain;
|
||||
struct irq_desc *chained_desc;
|
||||
unsigned long *bitmap;
|
||||
struct irq_domain_ops ops;
|
||||
};
|
||||
|
||||
static bool partition_check_cpu(struct partition_desc *part,
|
||||
unsigned int cpu, unsigned int hwirq)
|
||||
{
|
||||
return cpumask_test_cpu(cpu, &part->parts[hwirq].mask);
|
||||
}
|
||||
|
||||
static void partition_irq_mask(struct irq_data *d)
|
||||
{
|
||||
struct partition_desc *part = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
|
||||
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
|
||||
|
||||
if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
|
||||
chip->irq_mask)
|
||||
chip->irq_mask(data);
|
||||
}
|
||||
|
||||
static void partition_irq_unmask(struct irq_data *d)
|
||||
{
|
||||
struct partition_desc *part = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
|
||||
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
|
||||
|
||||
if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
|
||||
chip->irq_unmask)
|
||||
chip->irq_unmask(data);
|
||||
}
|
||||
|
||||
static int partition_irq_set_irqchip_state(struct irq_data *d,
|
||||
enum irqchip_irq_state which,
|
||||
bool val)
|
||||
{
|
||||
struct partition_desc *part = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
|
||||
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
|
||||
|
||||
if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
|
||||
chip->irq_set_irqchip_state)
|
||||
return chip->irq_set_irqchip_state(data, which, val);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int partition_irq_get_irqchip_state(struct irq_data *d,
|
||||
enum irqchip_irq_state which,
|
||||
bool *val)
|
||||
{
|
||||
struct partition_desc *part = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
|
||||
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
|
||||
|
||||
if (partition_check_cpu(part, smp_processor_id(), d->hwirq) &&
|
||||
chip->irq_get_irqchip_state)
|
||||
return chip->irq_get_irqchip_state(data, which, val);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int partition_irq_set_type(struct irq_data *d, unsigned int type)
|
||||
{
|
||||
struct partition_desc *part = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
|
||||
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
|
||||
|
||||
if (chip->irq_set_type)
|
||||
return chip->irq_set_type(data, type);
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void partition_irq_print_chip(struct irq_data *d, struct seq_file *p)
|
||||
{
|
||||
struct partition_desc *part = irq_data_get_irq_chip_data(d);
|
||||
struct irq_chip *chip = irq_desc_get_chip(part->chained_desc);
|
||||
struct irq_data *data = irq_desc_get_irq_data(part->chained_desc);
|
||||
|
||||
seq_printf(p, " %5s-%lu", chip->name, data->hwirq);
|
||||
}
|
||||
|
||||
static struct irq_chip partition_irq_chip = {
|
||||
.irq_mask = partition_irq_mask,
|
||||
.irq_unmask = partition_irq_unmask,
|
||||
.irq_set_type = partition_irq_set_type,
|
||||
.irq_get_irqchip_state = partition_irq_get_irqchip_state,
|
||||
.irq_set_irqchip_state = partition_irq_set_irqchip_state,
|
||||
.irq_print_chip = partition_irq_print_chip,
|
||||
};
|
||||
|
||||
static void partition_handle_irq(struct irq_desc *desc)
|
||||
{
|
||||
struct partition_desc *part = irq_desc_get_handler_data(desc);
|
||||
struct irq_chip *chip = irq_desc_get_chip(desc);
|
||||
int cpu = smp_processor_id();
|
||||
int hwirq;
|
||||
|
||||
chained_irq_enter(chip, desc);
|
||||
|
||||
for_each_set_bit(hwirq, part->bitmap, part->nr_parts) {
|
||||
if (partition_check_cpu(part, cpu, hwirq))
|
||||
break;
|
||||
}
|
||||
|
||||
if (unlikely(hwirq == part->nr_parts)) {
|
||||
handle_bad_irq(desc);
|
||||
} else {
|
||||
unsigned int irq;
|
||||
irq = irq_find_mapping(part->domain, hwirq);
|
||||
generic_handle_irq(irq);
|
||||
}
|
||||
|
||||
chained_irq_exit(chip, desc);
|
||||
}
|
||||
|
||||
static int partition_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs, void *arg)
|
||||
{
|
||||
int ret;
|
||||
irq_hw_number_t hwirq;
|
||||
unsigned int type;
|
||||
struct irq_fwspec *fwspec = arg;
|
||||
struct partition_desc *part;
|
||||
|
||||
BUG_ON(nr_irqs != 1);
|
||||
ret = domain->ops->translate(domain, fwspec, &hwirq, &type);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
part = domain->host_data;
|
||||
|
||||
set_bit(hwirq, part->bitmap);
|
||||
irq_set_chained_handler_and_data(irq_desc_get_irq(part->chained_desc),
|
||||
partition_handle_irq, part);
|
||||
irq_set_percpu_devid_partition(virq, &part->parts[hwirq].mask);
|
||||
irq_domain_set_info(domain, virq, hwirq, &partition_irq_chip, part,
|
||||
handle_percpu_devid_irq, NULL, NULL);
|
||||
irq_set_status_flags(virq, IRQ_NOAUTOEN);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void partition_domain_free(struct irq_domain *domain, unsigned int virq,
|
||||
unsigned int nr_irqs)
|
||||
{
|
||||
struct irq_data *d;
|
||||
|
||||
BUG_ON(nr_irqs != 1);
|
||||
|
||||
d = irq_domain_get_irq_data(domain, virq);
|
||||
irq_set_handler(virq, NULL);
|
||||
irq_domain_reset_irq_data(d);
|
||||
}
|
||||
|
||||
int partition_translate_id(struct partition_desc *desc, void *partition_id)
|
||||
{
|
||||
struct partition_affinity *part = NULL;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < desc->nr_parts; i++) {
|
||||
if (desc->parts[i].partition_id == partition_id) {
|
||||
part = &desc->parts[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (WARN_ON(!part)) {
|
||||
pr_err("Failed to find partition\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return i;
|
||||
}
|
||||
|
||||
struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
|
||||
struct partition_affinity *parts,
|
||||
int nr_parts,
|
||||
int chained_irq,
|
||||
const struct irq_domain_ops *ops)
|
||||
{
|
||||
struct partition_desc *desc;
|
||||
struct irq_domain *d;
|
||||
|
||||
BUG_ON(!ops->select || !ops->translate);
|
||||
|
||||
desc = kzalloc(sizeof(*desc), GFP_KERNEL);
|
||||
if (!desc)
|
||||
return NULL;
|
||||
|
||||
desc->ops = *ops;
|
||||
desc->ops.free = partition_domain_free;
|
||||
desc->ops.alloc = partition_domain_alloc;
|
||||
|
||||
d = irq_domain_create_linear(fwnode, nr_parts, &desc->ops, desc);
|
||||
if (!d)
|
||||
goto out;
|
||||
desc->domain = d;
|
||||
|
||||
desc->bitmap = kzalloc(sizeof(long) * BITS_TO_LONGS(nr_parts),
|
||||
GFP_KERNEL);
|
||||
if (WARN_ON(!desc->bitmap))
|
||||
goto out;
|
||||
|
||||
desc->chained_desc = irq_to_desc(chained_irq);
|
||||
desc->nr_parts = nr_parts;
|
||||
desc->parts = parts;
|
||||
|
||||
return desc;
|
||||
out:
|
||||
if (d)
|
||||
irq_domain_remove(d);
|
||||
kfree(desc);
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct irq_domain *partition_get_domain(struct partition_desc *dsc)
|
||||
{
|
||||
if (dsc)
|
||||
return dsc->domain;
|
||||
|
||||
return NULL;
|
||||
}
|
|
@ -235,7 +235,7 @@ static int tegra_ictlr_domain_translate(struct irq_domain *d,
|
|||
return -EINVAL;
|
||||
|
||||
*hwirq = fwspec->param[1];
|
||||
*type = fwspec->param[2];
|
||||
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -530,6 +530,10 @@ static inline void irq_set_chip_and_handler(unsigned int irq, struct irq_chip *c
|
|||
}
|
||||
|
||||
extern int irq_set_percpu_devid(unsigned int irq);
|
||||
extern int irq_set_percpu_devid_partition(unsigned int irq,
|
||||
const struct cpumask *affinity);
|
||||
extern int irq_get_percpu_devid_partition(unsigned int irq,
|
||||
struct cpumask *affinity);
|
||||
|
||||
extern void
|
||||
__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
|
||||
|
|
|
@ -102,8 +102,6 @@
|
|||
#define GICR_SYNCR 0x00C0
|
||||
#define GICR_MOVLPIR 0x0100
|
||||
#define GICR_MOVALLR 0x0110
|
||||
#define GICR_ISACTIVER GICD_ISACTIVER
|
||||
#define GICR_ICACTIVER GICD_ICACTIVER
|
||||
#define GICR_IDREGS GICD_IDREGS
|
||||
#define GICR_PIDR2 GICD_PIDR2
|
||||
|
||||
|
|
59
include/linux/irqchip/irq-partition-percpu.h
Normal file
59
include/linux/irqchip/irq-partition-percpu.h
Normal file
|
@ -0,0 +1,59 @@
|
|||
/*
|
||||
* Copyright (C) 2016 ARM Limited, All Rights Reserved.
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include <linux/fwnode.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/irqdomain.h>
|
||||
|
||||
struct partition_affinity {
|
||||
cpumask_t mask;
|
||||
void *partition_id;
|
||||
};
|
||||
|
||||
struct partition_desc;
|
||||
|
||||
#ifdef CONFIG_PARTITION_PERCPU
|
||||
int partition_translate_id(struct partition_desc *desc, void *partition_id);
|
||||
struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
|
||||
struct partition_affinity *parts,
|
||||
int nr_parts,
|
||||
int chained_irq,
|
||||
const struct irq_domain_ops *ops);
|
||||
struct irq_domain *partition_get_domain(struct partition_desc *dsc);
|
||||
#else
|
||||
static inline int partition_translate_id(struct partition_desc *desc,
|
||||
void *partition_id)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct partition_desc *partition_create_desc(struct fwnode_handle *fwnode,
|
||||
struct partition_affinity *parts,
|
||||
int nr_parts,
|
||||
int chained_irq,
|
||||
const struct irq_domain_ops *ops)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct irq_domain *partition_get_domain(struct partition_desc *dsc)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
|
@ -66,6 +66,7 @@ struct irq_desc {
|
|||
int threads_handled_last;
|
||||
raw_spinlock_t lock;
|
||||
struct cpumask *percpu_enabled;
|
||||
const struct cpumask *percpu_affinity;
|
||||
#ifdef CONFIG_SMP
|
||||
const struct cpumask *affinity_hint;
|
||||
struct irq_affinity_notify *affinity_notify;
|
||||
|
|
|
@ -96,6 +96,8 @@ enum irq_domain_bus_token {
|
|||
struct irq_domain_ops {
|
||||
int (*match)(struct irq_domain *d, struct device_node *node,
|
||||
enum irq_domain_bus_token bus_token);
|
||||
int (*select)(struct irq_domain *d, struct irq_fwspec *fwspec,
|
||||
enum irq_domain_bus_token bus_token);
|
||||
int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
|
||||
void (*unmap)(struct irq_domain *d, unsigned int virq);
|
||||
int (*xlate)(struct irq_domain *d, struct device_node *node,
|
||||
|
@ -211,7 +213,7 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
|
|||
irq_hw_number_t first_hwirq,
|
||||
const struct irq_domain_ops *ops,
|
||||
void *host_data);
|
||||
extern struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
|
||||
extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
|
||||
enum irq_domain_bus_token bus_token);
|
||||
extern void irq_set_default_host(struct irq_domain *host);
|
||||
extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
|
||||
|
@ -227,6 +229,17 @@ static inline bool is_fwnode_irqchip(struct fwnode_handle *fwnode)
|
|||
return fwnode && fwnode->type == FWNODE_IRQCHIP;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
|
||||
enum irq_domain_bus_token bus_token)
|
||||
{
|
||||
struct irq_fwspec fwspec = {
|
||||
.fwnode = fwnode,
|
||||
};
|
||||
|
||||
return irq_find_matching_fwspec(&fwspec, bus_token);
|
||||
}
|
||||
|
||||
static inline struct irq_domain *irq_find_matching_host(struct device_node *node,
|
||||
enum irq_domain_bus_token bus_token)
|
||||
{
|
||||
|
@ -346,9 +359,8 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
|
|||
irq_hw_number_t *out_hwirq, unsigned int *out_type);
|
||||
|
||||
/* IPI functions */
|
||||
unsigned int irq_reserve_ipi(struct irq_domain *domain,
|
||||
const struct cpumask *dest);
|
||||
void irq_destroy_ipi(unsigned int irq);
|
||||
int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
|
||||
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
|
||||
|
||||
/* V2 interfaces to support hierarchy IRQ domains. */
|
||||
extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
*
|
||||
* Allocate a virq that can be used to send IPI to any CPU in dest mask.
|
||||
*
|
||||
* On success it'll return linux irq number and 0 on failure
|
||||
* On success it'll return linux irq number and error code on failure
|
||||
*/
|
||||
unsigned int irq_reserve_ipi(struct irq_domain *domain,
|
||||
int irq_reserve_ipi(struct irq_domain *domain,
|
||||
const struct cpumask *dest)
|
||||
{
|
||||
unsigned int nr_irqs, offset;
|
||||
|
@ -30,18 +30,18 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
|
|||
|
||||
if (!domain ||!irq_domain_is_ipi(domain)) {
|
||||
pr_warn("Reservation on a non IPI domain\n");
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!cpumask_subset(dest, cpu_possible_mask)) {
|
||||
pr_warn("Reservation is not in possible_cpu_mask\n");
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
nr_irqs = cpumask_weight(dest);
|
||||
if (!nr_irqs) {
|
||||
pr_warn("Reservation for empty destination mask\n");
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (irq_domain_is_ipi_single(domain)) {
|
||||
|
@ -72,14 +72,14 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
|
|||
next = cpumask_next(next, dest);
|
||||
if (next < nr_cpu_ids) {
|
||||
pr_warn("Destination mask has holes\n");
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
|
||||
if (virq <= 0) {
|
||||
pr_warn("Can't reserve IPI, failed to alloc descs\n");
|
||||
return 0;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
|
||||
|
@ -100,17 +100,20 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
|
|||
|
||||
free_descs:
|
||||
irq_free_descs(virq, nr_irqs);
|
||||
return 0;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
/**
|
||||
* irq_destroy_ipi() - unreserve an IPI that was previously allocated
|
||||
* @irq: linux irq number to be destroyed
|
||||
* @dest: cpumask of cpus which should have the IPI removed
|
||||
*
|
||||
* Return the IPIs allocated with irq_reserve_ipi() to the system destroying
|
||||
* all virqs associated with them.
|
||||
* The IPIs allocated with irq_reserve_ipi() are retuerned to the system
|
||||
* destroying all virqs associated with them.
|
||||
*
|
||||
* Return 0 on success or error code on failure.
|
||||
*/
|
||||
void irq_destroy_ipi(unsigned int irq)
|
||||
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
|
||||
{
|
||||
struct irq_data *data = irq_get_irq_data(irq);
|
||||
struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
|
||||
|
@ -118,7 +121,7 @@ void irq_destroy_ipi(unsigned int irq)
|
|||
unsigned int nr_irqs;
|
||||
|
||||
if (!irq || !data || !ipimask)
|
||||
return;
|
||||
return -EINVAL;
|
||||
|
||||
domain = data->domain;
|
||||
if (WARN_ON(domain == NULL))
|
||||
|
@ -126,15 +129,25 @@ void irq_destroy_ipi(unsigned int irq)
|
|||
|
||||
if (!irq_domain_is_ipi(domain)) {
|
||||
pr_warn("Trying to destroy a non IPI domain!\n");
|
||||
return;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (irq_domain_is_ipi_per_cpu(domain))
|
||||
nr_irqs = cpumask_weight(ipimask);
|
||||
else
|
||||
if (WARN_ON(!cpumask_subset(dest, ipimask)))
|
||||
/*
|
||||
* Must be destroying a subset of CPUs to which this IPI
|
||||
* was set up to target
|
||||
*/
|
||||
return -EINVAL;
|
||||
|
||||
if (irq_domain_is_ipi_per_cpu(domain)) {
|
||||
irq = irq + cpumask_first(dest) - data->common->ipi_offset;
|
||||
nr_irqs = cpumask_weight(dest);
|
||||
} else {
|
||||
nr_irqs = 1;
|
||||
}
|
||||
|
||||
irq_domain_free_irqs(irq, nr_irqs);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -595,7 +595,8 @@ void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
|
|||
chip_bus_sync_unlock(desc);
|
||||
}
|
||||
|
||||
int irq_set_percpu_devid(unsigned int irq)
|
||||
int irq_set_percpu_devid_partition(unsigned int irq,
|
||||
const struct cpumask *affinity)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
|
@ -610,10 +611,33 @@ int irq_set_percpu_devid(unsigned int irq)
|
|||
if (!desc->percpu_enabled)
|
||||
return -ENOMEM;
|
||||
|
||||
if (affinity)
|
||||
desc->percpu_affinity = affinity;
|
||||
else
|
||||
desc->percpu_affinity = cpu_possible_mask;
|
||||
|
||||
irq_set_percpu_devid_flags(irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int irq_set_percpu_devid(unsigned int irq)
|
||||
{
|
||||
return irq_set_percpu_devid_partition(irq, NULL);
|
||||
}
|
||||
|
||||
int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
|
||||
{
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (!desc || !desc->percpu_enabled)
|
||||
return -EINVAL;
|
||||
|
||||
if (affinity)
|
||||
cpumask_copy(affinity, desc->percpu_affinity);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kstat_incr_irq_this_cpu(unsigned int irq)
|
||||
{
|
||||
kstat_incr_irqs_this_cpu(irq_to_desc(irq));
|
||||
|
|
|
@ -243,14 +243,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
|
|||
EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
|
||||
|
||||
/**
|
||||
* irq_find_matching_fwnode() - Locates a domain for a given fwnode
|
||||
* @fwnode: FW descriptor of the interrupt controller
|
||||
* irq_find_matching_fwspec() - Locates a domain for a given fwspec
|
||||
* @fwspec: FW specifier for an interrupt
|
||||
* @bus_token: domain-specific data
|
||||
*/
|
||||
struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
|
||||
struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
|
||||
enum irq_domain_bus_token bus_token)
|
||||
{
|
||||
struct irq_domain *h, *found = NULL;
|
||||
struct fwnode_handle *fwnode = fwspec->fwnode;
|
||||
int rc;
|
||||
|
||||
/* We might want to match the legacy controller last since
|
||||
|
@ -264,7 +265,9 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
|
|||
*/
|
||||
mutex_lock(&irq_domain_mutex);
|
||||
list_for_each_entry(h, &irq_domain_list, link) {
|
||||
if (h->ops->match)
|
||||
if (h->ops->select && fwspec->param_count)
|
||||
rc = h->ops->select(h, fwspec, bus_token);
|
||||
else if (h->ops->match)
|
||||
rc = h->ops->match(h, to_of_node(fwnode), bus_token);
|
||||
else
|
||||
rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
|
||||
|
@ -279,7 +282,7 @@ struct irq_domain *irq_find_matching_fwnode(struct fwnode_handle *fwnode,
|
|||
mutex_unlock(&irq_domain_mutex);
|
||||
return found;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(irq_find_matching_fwnode);
|
||||
EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
|
||||
|
||||
/**
|
||||
* irq_set_default_host() - Set a "default" irq domain
|
||||
|
@ -574,11 +577,9 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
|
|||
int virq;
|
||||
|
||||
if (fwspec->fwnode) {
|
||||
domain = irq_find_matching_fwnode(fwspec->fwnode,
|
||||
DOMAIN_BUS_WIRED);
|
||||
domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
|
||||
if (!domain)
|
||||
domain = irq_find_matching_fwnode(fwspec->fwnode,
|
||||
DOMAIN_BUS_ANY);
|
||||
domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
|
||||
} else {
|
||||
domain = irq_default_domain;
|
||||
}
|
||||
|
|
|
@ -1407,7 +1407,7 @@ int setup_irq(unsigned int irq, struct irqaction *act)
|
|||
int retval;
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
|
||||
if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
|
||||
return -EINVAL;
|
||||
chip_bus_lock(desc);
|
||||
retval = __setup_irq(irq, desc, act);
|
||||
|
|
Loading…
Reference in New Issue
Block a user