forcedeth: napi - handle all processing

The napi poll routine has been modified to handle all interrupt events
and process them accordingly. Therefore, the ISR will now only schedule
the napi poll and disable all interrupts instead of just disabling rx
interrupt.

Signed-off-by: Ayaz Abdulla <aabdulla@nvidia.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Ayaz Abdulla 2009-03-05 08:02:14 +00:00 committed by David S. Miller
parent 33912e72d0
commit f27e6f39fc

View File

@ -3440,25 +3440,22 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
nv_msi_workaround(np);
#ifdef CONFIG_FORCEDETH_NAPI
spin_lock(&np->lock);
napi_schedule(&np->napi);
/* Disable furthur irq's
(msix not enabled with napi) */
writel(0, base + NvRegIrqMask);
spin_unlock(&np->lock);
return IRQ_HANDLED;
#else
spin_lock(&np->lock);
nv_tx_done(dev, np->tx_ring_size);
spin_unlock(&np->lock);
#ifdef CONFIG_FORCEDETH_NAPI
if (np->events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
napi_schedule(&np->napi);
/* Disable furthur receive irq's */
np->irqmask &= ~NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED)
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
else
writel(np->irqmask, base + NvRegIrqMask);
spin_unlock(&np->lock);
}
#else
if (nv_rx_process(dev, RX_WORK_PER_LOOP)) {
if (unlikely(nv_alloc_rx(dev))) {
spin_lock(&np->lock);
@ -3467,7 +3464,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
spin_unlock(&np->lock);
}
}
#endif
if (unlikely(np->events & NVREG_IRQ_LINK)) {
spin_lock(&np->lock);
nv_link_irq(dev);
@ -3513,7 +3510,7 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
break;
}
#endif
}
dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
@ -3548,25 +3545,22 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
nv_msi_workaround(np);
#ifdef CONFIG_FORCEDETH_NAPI
spin_lock(&np->lock);
napi_schedule(&np->napi);
/* Disable furthur irq's
(msix not enabled with napi) */
writel(0, base + NvRegIrqMask);
spin_unlock(&np->lock);
return IRQ_HANDLED;
#else
spin_lock(&np->lock);
nv_tx_done_optimized(dev, TX_WORK_PER_LOOP);
spin_unlock(&np->lock);
#ifdef CONFIG_FORCEDETH_NAPI
if (np->events & NVREG_IRQ_RX_ALL) {
spin_lock(&np->lock);
napi_schedule(&np->napi);
/* Disable furthur receive irq's */
np->irqmask &= ~NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED)
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
else
writel(np->irqmask, base + NvRegIrqMask);
spin_unlock(&np->lock);
}
#else
if (nv_rx_process_optimized(dev, RX_WORK_PER_LOOP)) {
if (unlikely(nv_alloc_rx_optimized(dev))) {
spin_lock(&np->lock);
@ -3575,7 +3569,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
spin_unlock(&np->lock);
}
}
#endif
if (unlikely(np->events & NVREG_IRQ_LINK)) {
spin_lock(&np->lock);
nv_link_irq(dev);
@ -3622,7 +3616,7 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq.\n", dev->name, i);
break;
}
#endif
}
dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
@ -3682,9 +3676,17 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
int pkts, retcode;
if (!nv_optimized(np)) {
spin_lock_irqsave(&np->lock, flags);
nv_tx_done(dev, np->tx_ring_size);
spin_unlock_irqrestore(&np->lock, flags);
pkts = nv_rx_process(dev, budget);
retcode = nv_alloc_rx(dev);
} else {
spin_lock_irqsave(&np->lock, flags);
nv_tx_done_optimized(dev, np->tx_ring_size);
spin_unlock_irqrestore(&np->lock, flags);
pkts = nv_rx_process_optimized(dev, budget);
retcode = nv_alloc_rx_optimized(dev);
}
@ -3696,17 +3698,37 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
spin_unlock_irqrestore(&np->lock, flags);
}
if (unlikely(np->events & NVREG_IRQ_LINK)) {
spin_lock_irqsave(&np->lock, flags);
nv_link_irq(dev);
spin_unlock_irqrestore(&np->lock, flags);
}
if (unlikely(np->need_linktimer && time_after(jiffies, np->link_timeout))) {
spin_lock_irqsave(&np->lock, flags);
nv_linkchange(dev);
spin_unlock_irqrestore(&np->lock, flags);
np->link_timeout = jiffies + LINK_TIMEOUT;
}
if (unlikely(np->events & NVREG_IRQ_RECOVER_ERROR)) {
spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown) {
np->nic_poll_irq = np->irqmask;
np->recover_error = 1;
mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
}
spin_unlock_irqrestore(&np->lock, flags);
__napi_complete(napi);
return pkts;
}
if (pkts < budget) {
/* re-enable receive interrupts */
/* re-enable interrupts
(msix not enabled in napi) */
spin_lock_irqsave(&np->lock, flags);
__napi_complete(napi);
np->irqmask |= NVREG_IRQ_RX_ALL;
if (np->msi_flags & NV_MSI_X_ENABLED)
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
else
writel(np->irqmask, base + NvRegIrqMask);
writel(np->irqmask, base + NvRegIrqMask);
spin_unlock_irqrestore(&np->lock, flags);
}