forked from luck/tmp_suning_uos_patched
Merge branches 'stable/irq.rework' and 'stable/pcifront-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen
* 'stable/irq.rework' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: xen/irq: Cleanup up the pirq_to_irq for DomU PV PCI passthrough guests as well. xen: Use IRQF_FORCE_RESUME xen/timer: Missing IRQF_NO_SUSPEND in timer code broke suspend. xen: Fix compile error introduced by "switch to new irq_chip functions" xen: Switch to new irq_chip functions xen: Remove stale irq_chip.end xen: events: do not free legacy IRQs xen: events: allocate GSIs and dynamic IRQs from separate IRQ ranges. xen: events: add xen_allocate_irq_{dynamic, gsi} and xen_free_irq xen:events: move find_unbound_irq inside CONFIG_PCI_MSI xen: handled remapped IRQs when enabling a pcifront PCI device. genirq: Add IRQF_FORCE_RESUME * 'stable/pcifront-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen: pci/xen: When free-ing MSI-X/MSI irq->desc also use generic code. pci/xen: Cleanup: convert int** to int[] pci/xen: Use xen_allocate_pirq_msi instead of xen_allocate_pirq xen-pcifront: Sanity check the MSI/MSI-X values xen-pcifront: don't use flush_scheduled_work()
This commit is contained in:
commit
397fae0818
|
@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void)
|
|||
* its own functions.
|
||||
*/
|
||||
struct xen_pci_frontend_ops {
|
||||
int (*enable_msi)(struct pci_dev *dev, int **vectors);
|
||||
int (*enable_msi)(struct pci_dev *dev, int vectors[]);
|
||||
void (*disable_msi)(struct pci_dev *dev);
|
||||
int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec);
|
||||
int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec);
|
||||
void (*disable_msix)(struct pci_dev *dev);
|
||||
};
|
||||
|
||||
extern struct xen_pci_frontend_ops *xen_pci_frontend;
|
||||
|
||||
static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev,
|
||||
int **vectors)
|
||||
int vectors[])
|
||||
{
|
||||
if (xen_pci_frontend && xen_pci_frontend->enable_msi)
|
||||
return xen_pci_frontend->enable_msi(dev, vectors);
|
||||
|
@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
|
|||
xen_pci_frontend->disable_msi(dev);
|
||||
}
|
||||
static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev,
|
||||
int **vectors, int nvec)
|
||||
int vectors[], int nvec)
|
||||
{
|
||||
if (xen_pci_frontend && xen_pci_frontend->enable_msix)
|
||||
return xen_pci_frontend->enable_msix(dev, vectors, nvec);
|
||||
|
|
|
@ -150,21 +150,21 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
|
|||
return -ENOMEM;
|
||||
|
||||
if (type == PCI_CAP_ID_MSIX)
|
||||
ret = xen_pci_frontend_enable_msix(dev, &v, nvec);
|
||||
ret = xen_pci_frontend_enable_msix(dev, v, nvec);
|
||||
else
|
||||
ret = xen_pci_frontend_enable_msi(dev, &v);
|
||||
ret = xen_pci_frontend_enable_msi(dev, v);
|
||||
if (ret)
|
||||
goto error;
|
||||
i = 0;
|
||||
list_for_each_entry(msidesc, &dev->msi_list, list) {
|
||||
irq = xen_allocate_pirq(v[i], 0, /* not sharable */
|
||||
xen_allocate_pirq_msi(
|
||||
(type == PCI_CAP_ID_MSIX) ?
|
||||
"pcifront-msi-x" : "pcifront-msi");
|
||||
"pcifront-msi-x" : "pcifront-msi",
|
||||
&irq, &v[i], XEN_ALLOC_IRQ);
|
||||
if (irq < 0) {
|
||||
ret = -1;
|
||||
goto free;
|
||||
}
|
||||
|
||||
ret = set_irq_msi(irq, msidesc);
|
||||
if (ret)
|
||||
goto error_while;
|
||||
|
@ -193,6 +193,9 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev)
|
|||
xen_pci_frontend_disable_msix(dev);
|
||||
else
|
||||
xen_pci_frontend_disable_msi(dev);
|
||||
|
||||
/* Free the IRQ's and the msidesc using the generic code. */
|
||||
default_teardown_msi_irqs(dev);
|
||||
}
|
||||
|
||||
static void xen_teardown_msi_irq(unsigned int irq)
|
||||
|
@ -226,21 +229,27 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev)
|
|||
{
|
||||
int rc;
|
||||
int share = 1;
|
||||
u8 gsi;
|
||||
|
||||
dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq);
|
||||
|
||||
if (dev->irq < 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (dev->irq < NR_IRQS_LEGACY)
|
||||
share = 0;
|
||||
|
||||
rc = xen_allocate_pirq(dev->irq, share, "pcifront");
|
||||
rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi);
|
||||
if (rc < 0) {
|
||||
dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n",
|
||||
dev->irq, rc);
|
||||
dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n",
|
||||
rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (gsi < NR_IRQS_LEGACY)
|
||||
share = 0;
|
||||
|
||||
rc = xen_allocate_pirq(gsi, share, "pcifront");
|
||||
if (rc < 0) {
|
||||
dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n",
|
||||
gsi, rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
dev->irq = rc;
|
||||
dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -397,7 +397,9 @@ void xen_setup_timer(int cpu)
|
|||
name = "<timer kasprintf failed>";
|
||||
|
||||
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
|
||||
IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER,
|
||||
IRQF_DISABLED|IRQF_PERCPU|
|
||||
IRQF_NOBALANCING|IRQF_TIMER|
|
||||
IRQF_FORCE_RESUME,
|
||||
name, NULL);
|
||||
|
||||
evt = &per_cpu(xen_clock_events, cpu);
|
||||
|
|
|
@ -243,7 +243,7 @@ struct pci_ops pcifront_bus_ops = {
|
|||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
static int pci_frontend_enable_msix(struct pci_dev *dev,
|
||||
int **vector, int nvec)
|
||||
int vector[], int nvec)
|
||||
{
|
||||
int err;
|
||||
int i;
|
||||
|
@ -277,18 +277,24 @@ static int pci_frontend_enable_msix(struct pci_dev *dev,
|
|||
if (likely(!err)) {
|
||||
if (likely(!op.value)) {
|
||||
/* we get the result */
|
||||
for (i = 0; i < nvec; i++)
|
||||
*(*vector+i) = op.msix_entries[i].vector;
|
||||
return 0;
|
||||
for (i = 0; i < nvec; i++) {
|
||||
if (op.msix_entries[i].vector <= 0) {
|
||||
dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n",
|
||||
i, op.msix_entries[i].vector);
|
||||
err = -EINVAL;
|
||||
vector[i] = -1;
|
||||
continue;
|
||||
}
|
||||
vector[i] = op.msix_entries[i].vector;
|
||||
}
|
||||
} else {
|
||||
printk(KERN_DEBUG "enable msix get value %x\n",
|
||||
op.value);
|
||||
return op.value;
|
||||
}
|
||||
} else {
|
||||
dev_err(&dev->dev, "enable msix get err %x\n", err);
|
||||
return err;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
static void pci_frontend_disable_msix(struct pci_dev *dev)
|
||||
|
@ -310,7 +316,7 @@ static void pci_frontend_disable_msix(struct pci_dev *dev)
|
|||
dev_err(&dev->dev, "pci_disable_msix get err %x\n", err);
|
||||
}
|
||||
|
||||
static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
|
||||
static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[])
|
||||
{
|
||||
int err;
|
||||
struct xen_pci_op op = {
|
||||
|
@ -324,7 +330,13 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector)
|
|||
|
||||
err = do_pci_op(pdev, &op);
|
||||
if (likely(!err)) {
|
||||
*(*vector) = op.value;
|
||||
vector[0] = op.value;
|
||||
if (op.value <= 0) {
|
||||
dev_warn(&dev->dev, "MSI entry is invalid: %d!\n",
|
||||
op.value);
|
||||
err = -EINVAL;
|
||||
vector[0] = -1;
|
||||
}
|
||||
} else {
|
||||
dev_err(&dev->dev, "pci frontend enable msi failed for dev "
|
||||
"%x:%x\n", op.bus, op.devfn);
|
||||
|
@ -733,8 +745,7 @@ static void free_pdev(struct pcifront_device *pdev)
|
|||
|
||||
pcifront_free_roots(pdev);
|
||||
|
||||
/*For PCIE_AER error handling job*/
|
||||
flush_scheduled_work();
|
||||
cancel_work_sync(&pdev->op_work);
|
||||
|
||||
if (pdev->irq >= 0)
|
||||
unbind_from_irqhandler(pdev->irq, pdev);
|
||||
|
|
|
@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
|
|||
|
||||
BUG_ON(irq == -1);
|
||||
#ifdef CONFIG_SMP
|
||||
cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu));
|
||||
cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
|
||||
#endif
|
||||
|
||||
clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq)));
|
||||
|
@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void)
|
|||
|
||||
/* By default all event channels notify CPU#0. */
|
||||
for_each_irq_desc(i, desc) {
|
||||
cpumask_copy(desc->affinity, cpumask_of(0));
|
||||
cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -376,81 +376,69 @@ static void unmask_evtchn(int port)
|
|||
put_cpu();
|
||||
}
|
||||
|
||||
static int get_nr_hw_irqs(void)
|
||||
static int xen_allocate_irq_dynamic(void)
|
||||
{
|
||||
int ret = 1;
|
||||
int first = 0;
|
||||
int irq;
|
||||
|
||||
#ifdef CONFIG_X86_IO_APIC
|
||||
ret = get_nr_irqs_gsi();
|
||||
/*
|
||||
* For an HVM guest or domain 0 which see "real" (emulated or
|
||||
* actual repectively) GSIs we allocate dynamic IRQs
|
||||
* e.g. those corresponding to event channels or MSIs
|
||||
* etc. from the range above those "real" GSIs to avoid
|
||||
* collisions.
|
||||
*/
|
||||
if (xen_initial_domain() || xen_hvm_domain())
|
||||
first = get_nr_irqs_gsi();
|
||||
#endif
|
||||
|
||||
return ret;
|
||||
}
|
||||
retry:
|
||||
irq = irq_alloc_desc_from(first, -1);
|
||||
|
||||
static int find_unbound_pirq(int type)
|
||||
{
|
||||
int rc, i;
|
||||
struct physdev_get_free_pirq op_get_free_pirq;
|
||||
op_get_free_pirq.type = type;
|
||||
|
||||
rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
|
||||
if (!rc)
|
||||
return op_get_free_pirq.pirq;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
if (pirq_to_irq[i] < 0)
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int find_unbound_irq(void)
|
||||
{
|
||||
struct irq_data *data;
|
||||
int irq, res;
|
||||
int bottom = get_nr_hw_irqs();
|
||||
int top = nr_irqs-1;
|
||||
|
||||
if (bottom == nr_irqs)
|
||||
goto no_irqs;
|
||||
|
||||
/* This loop starts from the top of IRQ space and goes down.
|
||||
* We need this b/c if we have a PCI device in a Xen PV guest
|
||||
* we do not have an IO-APIC (though the backend might have them)
|
||||
* mapped in. To not have a collision of physical IRQs with the Xen
|
||||
* event channels start at the top of the IRQ space for virtual IRQs.
|
||||
*/
|
||||
for (irq = top; irq > bottom; irq--) {
|
||||
data = irq_get_irq_data(irq);
|
||||
/* only 15->0 have init'd desc; handle irq > 16 */
|
||||
if (!data)
|
||||
break;
|
||||
if (data->chip == &no_irq_chip)
|
||||
break;
|
||||
if (data->chip != &xen_dynamic_chip)
|
||||
continue;
|
||||
if (irq_info[irq].type == IRQT_UNBOUND)
|
||||
return irq;
|
||||
if (irq == -ENOMEM && first > NR_IRQS_LEGACY) {
|
||||
printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n");
|
||||
first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
if (irq == bottom)
|
||||
goto no_irqs;
|
||||
|
||||
res = irq_alloc_desc_at(irq, -1);
|
||||
|
||||
if (WARN_ON(res != irq))
|
||||
return -1;
|
||||
if (irq < 0)
|
||||
panic("No available IRQ to bind to: increase nr_irqs!\n");
|
||||
|
||||
return irq;
|
||||
|
||||
no_irqs:
|
||||
panic("No available IRQ to bind to: increase nr_irqs!\n");
|
||||
}
|
||||
|
||||
static bool identity_mapped_irq(unsigned irq)
|
||||
static int xen_allocate_irq_gsi(unsigned gsi)
|
||||
{
|
||||
/* identity map all the hardware irqs */
|
||||
return irq < get_nr_hw_irqs();
|
||||
int irq;
|
||||
|
||||
/*
|
||||
* A PV guest has no concept of a GSI (since it has no ACPI
|
||||
* nor access to/knowledge of the physical APICs). Therefore
|
||||
* all IRQs are dynamically allocated from the entire IRQ
|
||||
* space.
|
||||
*/
|
||||
if (xen_pv_domain() && !xen_initial_domain())
|
||||
return xen_allocate_irq_dynamic();
|
||||
|
||||
/* Legacy IRQ descriptors are already allocated by the arch. */
|
||||
if (gsi < NR_IRQS_LEGACY)
|
||||
return gsi;
|
||||
|
||||
irq = irq_alloc_desc_at(gsi, -1);
|
||||
if (irq < 0)
|
||||
panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq);
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
static void xen_free_irq(unsigned irq)
|
||||
{
|
||||
/* Legacy IRQ descriptors are managed by the arch. */
|
||||
if (irq < NR_IRQS_LEGACY)
|
||||
return;
|
||||
|
||||
irq_free_desc(irq);
|
||||
}
|
||||
|
||||
static void pirq_unmask_notify(int irq)
|
||||
|
@ -486,7 +474,7 @@ static bool probing_irq(int irq)
|
|||
return desc && desc->action == NULL;
|
||||
}
|
||||
|
||||
static unsigned int startup_pirq(unsigned int irq)
|
||||
static unsigned int __startup_pirq(unsigned int irq)
|
||||
{
|
||||
struct evtchn_bind_pirq bind_pirq;
|
||||
struct irq_info *info = info_for_irq(irq);
|
||||
|
@ -524,9 +512,15 @@ static unsigned int startup_pirq(unsigned int irq)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void shutdown_pirq(unsigned int irq)
|
||||
static unsigned int startup_pirq(struct irq_data *data)
|
||||
{
|
||||
return __startup_pirq(data->irq);
|
||||
}
|
||||
|
||||
static void shutdown_pirq(struct irq_data *data)
|
||||
{
|
||||
struct evtchn_close close;
|
||||
unsigned int irq = data->irq;
|
||||
struct irq_info *info = info_for_irq(irq);
|
||||
int evtchn = evtchn_from_irq(irq);
|
||||
|
||||
|
@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq)
|
|||
info->evtchn = 0;
|
||||
}
|
||||
|
||||
static void enable_pirq(unsigned int irq)
|
||||
static void enable_pirq(struct irq_data *data)
|
||||
{
|
||||
startup_pirq(irq);
|
||||
startup_pirq(data);
|
||||
}
|
||||
|
||||
static void disable_pirq(unsigned int irq)
|
||||
static void disable_pirq(struct irq_data *data)
|
||||
{
|
||||
}
|
||||
|
||||
static void ack_pirq(unsigned int irq)
|
||||
static void ack_pirq(struct irq_data *data)
|
||||
{
|
||||
int evtchn = evtchn_from_irq(irq);
|
||||
int evtchn = evtchn_from_irq(data->irq);
|
||||
|
||||
move_native_irq(irq);
|
||||
move_native_irq(data->irq);
|
||||
|
||||
if (VALID_EVTCHN(evtchn)) {
|
||||
mask_evtchn(evtchn);
|
||||
|
@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq)
|
|||
}
|
||||
}
|
||||
|
||||
static void end_pirq(unsigned int irq)
|
||||
{
|
||||
int evtchn = evtchn_from_irq(irq);
|
||||
struct irq_desc *desc = irq_to_desc(irq);
|
||||
|
||||
if (WARN_ON(!desc))
|
||||
return;
|
||||
|
||||
if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) ==
|
||||
(IRQ_DISABLED|IRQ_PENDING)) {
|
||||
shutdown_pirq(irq);
|
||||
} else if (VALID_EVTCHN(evtchn)) {
|
||||
unmask_evtchn(evtchn);
|
||||
pirq_unmask_notify(irq);
|
||||
}
|
||||
}
|
||||
|
||||
static int find_irq_by_gsi(unsigned gsi)
|
||||
{
|
||||
int irq;
|
||||
|
@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
|
|||
goto out; /* XXX need refcount? */
|
||||
}
|
||||
|
||||
/* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore
|
||||
* we are using the !xen_initial_domain() to drop in the function.*/
|
||||
if (identity_mapped_irq(gsi) || (!xen_initial_domain() &&
|
||||
xen_pv_domain())) {
|
||||
irq = gsi;
|
||||
irq_alloc_desc_at(irq, -1);
|
||||
} else
|
||||
irq = find_unbound_irq();
|
||||
irq = xen_allocate_irq_gsi(gsi);
|
||||
|
||||
set_irq_chip_and_handler_name(irq, &xen_pirq_chip,
|
||||
handle_level_irq, name);
|
||||
|
@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
|
|||
* this in the priv domain. */
|
||||
if (xen_initial_domain() &&
|
||||
HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
|
||||
irq_free_desc(irq);
|
||||
xen_free_irq(irq);
|
||||
irq = -ENOSPC;
|
||||
goto out;
|
||||
}
|
||||
|
@ -677,12 +647,29 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name)
|
|||
#include <linux/msi.h>
|
||||
#include "../pci/msi.h"
|
||||
|
||||
static int find_unbound_pirq(int type)
|
||||
{
|
||||
int rc, i;
|
||||
struct physdev_get_free_pirq op_get_free_pirq;
|
||||
op_get_free_pirq.type = type;
|
||||
|
||||
rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
|
||||
if (!rc)
|
||||
return op_get_free_pirq.pirq;
|
||||
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
if (pirq_to_irq[i] < 0)
|
||||
return i;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc)
|
||||
{
|
||||
spin_lock(&irq_mapping_update_lock);
|
||||
|
||||
if (alloc & XEN_ALLOC_IRQ) {
|
||||
*irq = find_unbound_irq();
|
||||
*irq = xen_allocate_irq_dynamic();
|
||||
if (*irq == -1)
|
||||
goto out;
|
||||
}
|
||||
|
@ -732,7 +719,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
|
|||
|
||||
spin_lock(&irq_mapping_update_lock);
|
||||
|
||||
irq = find_unbound_irq();
|
||||
irq = xen_allocate_irq_dynamic();
|
||||
|
||||
if (irq == -1)
|
||||
goto out;
|
||||
|
@ -741,7 +728,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type)
|
|||
if (rc) {
|
||||
printk(KERN_WARNING "xen map irq failed %d\n", rc);
|
||||
|
||||
irq_free_desc(irq);
|
||||
xen_free_irq(irq);
|
||||
|
||||
irq = -1;
|
||||
goto out;
|
||||
|
@ -779,11 +766,12 @@ int xen_destroy_irq(int irq)
|
|||
printk(KERN_WARNING "unmap irq failed %d\n", rc);
|
||||
goto out;
|
||||
}
|
||||
pirq_to_irq[info->u.pirq.pirq] = -1;
|
||||
}
|
||||
pirq_to_irq[info->u.pirq.pirq] = -1;
|
||||
|
||||
irq_info[irq] = mk_unbound_info();
|
||||
|
||||
irq_free_desc(irq);
|
||||
xen_free_irq(irq);
|
||||
|
||||
out:
|
||||
spin_unlock(&irq_mapping_update_lock);
|
||||
|
@ -814,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn)
|
|||
irq = evtchn_to_irq[evtchn];
|
||||
|
||||
if (irq == -1) {
|
||||
irq = find_unbound_irq();
|
||||
irq = xen_allocate_irq_dynamic();
|
||||
|
||||
set_irq_chip_and_handler_name(irq, &xen_dynamic_chip,
|
||||
handle_fasteoi_irq, "event");
|
||||
|
@ -839,7 +827,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
|
|||
irq = per_cpu(ipi_to_irq, cpu)[ipi];
|
||||
|
||||
if (irq == -1) {
|
||||
irq = find_unbound_irq();
|
||||
irq = xen_allocate_irq_dynamic();
|
||||
if (irq < 0)
|
||||
goto out;
|
||||
|
||||
|
@ -875,7 +863,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
|
|||
irq = per_cpu(virq_to_irq, cpu)[virq];
|
||||
|
||||
if (irq == -1) {
|
||||
irq = find_unbound_irq();
|
||||
irq = xen_allocate_irq_dynamic();
|
||||
|
||||
set_irq_chip_and_handler_name(irq, &xen_percpu_chip,
|
||||
handle_percpu_irq, "virq");
|
||||
|
@ -934,7 +922,7 @@ static void unbind_from_irq(unsigned int irq)
|
|||
if (irq_info[irq].type != IRQT_UNBOUND) {
|
||||
irq_info[irq] = mk_unbound_info();
|
||||
|
||||
irq_free_desc(irq);
|
||||
xen_free_irq(irq);
|
||||
}
|
||||
|
||||
spin_unlock(&irq_mapping_update_lock);
|
||||
|
@ -990,7 +978,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
|
|||
if (irq < 0)
|
||||
return irq;
|
||||
|
||||
irqflags |= IRQF_NO_SUSPEND;
|
||||
irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
|
||||
retval = request_irq(irq, handler, irqflags, devname, dev_id);
|
||||
if (retval != 0) {
|
||||
unbind_from_irq(irq);
|
||||
|
@ -1234,11 +1222,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int set_affinity_irq(unsigned irq, const struct cpumask *dest)
|
||||
static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
|
||||
bool force)
|
||||
{
|
||||
unsigned tcpu = cpumask_first(dest);
|
||||
|
||||
return rebind_irq_to_cpu(irq, tcpu);
|
||||
return rebind_irq_to_cpu(data->irq, tcpu);
|
||||
}
|
||||
|
||||
int resend_irq_on_evtchn(unsigned int irq)
|
||||
|
@ -1257,35 +1246,35 @@ int resend_irq_on_evtchn(unsigned int irq)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void enable_dynirq(unsigned int irq)
|
||||
static void enable_dynirq(struct irq_data *data)
|
||||
{
|
||||
int evtchn = evtchn_from_irq(irq);
|
||||
int evtchn = evtchn_from_irq(data->irq);
|
||||
|
||||
if (VALID_EVTCHN(evtchn))
|
||||
unmask_evtchn(evtchn);
|
||||
}
|
||||
|
||||
static void disable_dynirq(unsigned int irq)
|
||||
static void disable_dynirq(struct irq_data *data)
|
||||
{
|
||||
int evtchn = evtchn_from_irq(irq);
|
||||
int evtchn = evtchn_from_irq(data->irq);
|
||||
|
||||
if (VALID_EVTCHN(evtchn))
|
||||
mask_evtchn(evtchn);
|
||||
}
|
||||
|
||||
static void ack_dynirq(unsigned int irq)
|
||||
static void ack_dynirq(struct irq_data *data)
|
||||
{
|
||||
int evtchn = evtchn_from_irq(irq);
|
||||
int evtchn = evtchn_from_irq(data->irq);
|
||||
|
||||
move_masked_irq(irq);
|
||||
move_masked_irq(data->irq);
|
||||
|
||||
if (VALID_EVTCHN(evtchn))
|
||||
unmask_evtchn(evtchn);
|
||||
}
|
||||
|
||||
static int retrigger_dynirq(unsigned int irq)
|
||||
static int retrigger_dynirq(struct irq_data *data)
|
||||
{
|
||||
int evtchn = evtchn_from_irq(irq);
|
||||
int evtchn = evtchn_from_irq(data->irq);
|
||||
struct shared_info *sh = HYPERVISOR_shared_info;
|
||||
int ret = 0;
|
||||
|
||||
|
@ -1334,7 +1323,7 @@ static void restore_cpu_pirqs(void)
|
|||
|
||||
printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
|
||||
|
||||
startup_pirq(irq);
|
||||
__startup_pirq(irq);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1445,7 +1434,6 @@ void xen_poll_irq(int irq)
|
|||
void xen_irq_resume(void)
|
||||
{
|
||||
unsigned int cpu, irq, evtchn;
|
||||
struct irq_desc *desc;
|
||||
|
||||
init_evtchn_cpu_bindings();
|
||||
|
||||
|
@ -1465,66 +1453,48 @@ void xen_irq_resume(void)
|
|||
restore_cpu_ipis(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These
|
||||
* are not handled by the IRQ core.
|
||||
*/
|
||||
for_each_irq_desc(irq, desc) {
|
||||
if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND))
|
||||
continue;
|
||||
if (desc->status & IRQ_DISABLED)
|
||||
continue;
|
||||
|
||||
evtchn = evtchn_from_irq(irq);
|
||||
if (evtchn == -1)
|
||||
continue;
|
||||
|
||||
unmask_evtchn(evtchn);
|
||||
}
|
||||
|
||||
restore_cpu_pirqs();
|
||||
}
|
||||
|
||||
static struct irq_chip xen_dynamic_chip __read_mostly = {
|
||||
.name = "xen-dyn",
|
||||
.name = "xen-dyn",
|
||||
|
||||
.disable = disable_dynirq,
|
||||
.mask = disable_dynirq,
|
||||
.unmask = enable_dynirq,
|
||||
.irq_disable = disable_dynirq,
|
||||
.irq_mask = disable_dynirq,
|
||||
.irq_unmask = enable_dynirq,
|
||||
|
||||
.eoi = ack_dynirq,
|
||||
.set_affinity = set_affinity_irq,
|
||||
.retrigger = retrigger_dynirq,
|
||||
.irq_eoi = ack_dynirq,
|
||||
.irq_set_affinity = set_affinity_irq,
|
||||
.irq_retrigger = retrigger_dynirq,
|
||||
};
|
||||
|
||||
static struct irq_chip xen_pirq_chip __read_mostly = {
|
||||
.name = "xen-pirq",
|
||||
.name = "xen-pirq",
|
||||
|
||||
.startup = startup_pirq,
|
||||
.shutdown = shutdown_pirq,
|
||||
.irq_startup = startup_pirq,
|
||||
.irq_shutdown = shutdown_pirq,
|
||||
|
||||
.enable = enable_pirq,
|
||||
.unmask = enable_pirq,
|
||||
.irq_enable = enable_pirq,
|
||||
.irq_unmask = enable_pirq,
|
||||
|
||||
.disable = disable_pirq,
|
||||
.mask = disable_pirq,
|
||||
.irq_disable = disable_pirq,
|
||||
.irq_mask = disable_pirq,
|
||||
|
||||
.ack = ack_pirq,
|
||||
.end = end_pirq,
|
||||
.irq_ack = ack_pirq,
|
||||
|
||||
.set_affinity = set_affinity_irq,
|
||||
.irq_set_affinity = set_affinity_irq,
|
||||
|
||||
.retrigger = retrigger_dynirq,
|
||||
.irq_retrigger = retrigger_dynirq,
|
||||
};
|
||||
|
||||
static struct irq_chip xen_percpu_chip __read_mostly = {
|
||||
.name = "xen-percpu",
|
||||
.name = "xen-percpu",
|
||||
|
||||
.disable = disable_dynirq,
|
||||
.mask = disable_dynirq,
|
||||
.unmask = enable_dynirq,
|
||||
.irq_disable = disable_dynirq,
|
||||
.irq_mask = disable_dynirq,
|
||||
.irq_unmask = enable_dynirq,
|
||||
|
||||
.ack = ack_dynirq,
|
||||
.irq_ack = ack_dynirq,
|
||||
};
|
||||
|
||||
int xen_set_callback_via(uint64_t via)
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
* Used by threaded interrupts which need to keep the
|
||||
* irq line disabled until the threaded handler has been run.
|
||||
* IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
|
||||
*
|
||||
* IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
|
||||
*/
|
||||
#define IRQF_DISABLED 0x00000020
|
||||
#define IRQF_SAMPLE_RANDOM 0x00000040
|
||||
|
@ -67,6 +67,7 @@
|
|||
#define IRQF_IRQPOLL 0x00001000
|
||||
#define IRQF_ONESHOT 0x00002000
|
||||
#define IRQF_NO_SUSPEND 0x00004000
|
||||
#define IRQF_FORCE_RESUME 0x00008000
|
||||
|
||||
#define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND)
|
||||
|
||||
|
|
|
@ -282,8 +282,17 @@ EXPORT_SYMBOL(disable_irq);
|
|||
|
||||
void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
|
||||
{
|
||||
if (resume)
|
||||
if (resume) {
|
||||
if (!(desc->status & IRQ_SUSPENDED)) {
|
||||
if (!desc->action)
|
||||
return;
|
||||
if (!(desc->action->flags & IRQF_FORCE_RESUME))
|
||||
return;
|
||||
/* Pretend that it got disabled ! */
|
||||
desc->depth++;
|
||||
}
|
||||
desc->status &= ~IRQ_SUSPENDED;
|
||||
}
|
||||
|
||||
switch (desc->depth) {
|
||||
case 0:
|
||||
|
|
|
@ -53,9 +53,6 @@ void resume_device_irqs(void)
|
|||
for_each_irq_desc(irq, desc) {
|
||||
unsigned long flags;
|
||||
|
||||
if (!(desc->status & IRQ_SUSPENDED))
|
||||
continue;
|
||||
|
||||
raw_spin_lock_irqsave(&desc->lock, flags);
|
||||
__enable_irq(desc, irq, true);
|
||||
raw_spin_unlock_irqrestore(&desc->lock, flags);
|
||||
|
|
Loading…
Reference in New Issue
Block a user