forked from luck/tmp_suning_uos_patched
netpoll: make ndo_poll_controller() optional
As diagnosed by Song Liu, ndo_poll_controller() can be very dangerous on loaded hosts, since the cpu calling ndo_poll_controller() might steal all NAPI contexts (for all RX/TX queues of the NIC). This capture can last for unlimited amount of time, since one cpu is generally not able to drain all the queues under load. It seems that all networking drivers that do use NAPI for their TX completions, should not provide a ndo_poll_controller(). NAPI drivers have netpoll support already handled in core networking stack, since netpoll_poll_dev() uses poll_napi(dev) to iterate through registered NAPI contexts for a device. This patch allows netpoll_poll_dev() to process NAPI contexts even for drivers not providing ndo_poll_controller(), allowing for following patches in NAPI drivers. Also we export netpoll_poll_dev() so that it can be called by bonding/team drivers in following patches. Reported-by: Song Liu <songliubraving@fb.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Tested-by: Song Liu <songliubraving@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
16fdf8ba98
commit
ac3d9dd034
|
@ -49,8 +49,9 @@ struct netpoll_info {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_NETPOLL
|
||||
extern void netpoll_poll_disable(struct net_device *dev);
|
||||
extern void netpoll_poll_enable(struct net_device *dev);
|
||||
void netpoll_poll_dev(struct net_device *dev);
|
||||
void netpoll_poll_disable(struct net_device *dev);
|
||||
void netpoll_poll_enable(struct net_device *dev);
|
||||
#else
|
||||
static inline void netpoll_poll_disable(struct net_device *dev) { return; }
|
||||
static inline void netpoll_poll_enable(struct net_device *dev) { return; }
|
||||
|
|
|
@ -187,16 +187,16 @@ static void poll_napi(struct net_device *dev)
|
|||
}
|
||||
}
|
||||
|
||||
static void netpoll_poll_dev(struct net_device *dev)
|
||||
void netpoll_poll_dev(struct net_device *dev)
|
||||
{
|
||||
const struct net_device_ops *ops;
|
||||
struct netpoll_info *ni = rcu_dereference_bh(dev->npinfo);
|
||||
const struct net_device_ops *ops;
|
||||
|
||||
/* Don't do any rx activity if the dev_lock mutex is held
|
||||
* the dev_open/close paths use this to block netpoll activity
|
||||
* while changing device state
|
||||
*/
|
||||
if (down_trylock(&ni->dev_lock))
|
||||
if (!ni || down_trylock(&ni->dev_lock))
|
||||
return;
|
||||
|
||||
if (!netif_running(dev)) {
|
||||
|
@ -205,13 +205,8 @@ static void netpoll_poll_dev(struct net_device *dev)
|
|||
}
|
||||
|
||||
ops = dev->netdev_ops;
|
||||
if (!ops->ndo_poll_controller) {
|
||||
up(&ni->dev_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Process pending work on NIC */
|
||||
ops->ndo_poll_controller(dev);
|
||||
if (ops->ndo_poll_controller)
|
||||
ops->ndo_poll_controller(dev);
|
||||
|
||||
poll_napi(dev);
|
||||
|
||||
|
@ -219,6 +214,7 @@ static void netpoll_poll_dev(struct net_device *dev)
|
|||
|
||||
zap_completion_queue();
|
||||
}
|
||||
EXPORT_SYMBOL(netpoll_poll_dev);
|
||||
|
||||
void netpoll_poll_disable(struct net_device *dev)
|
||||
{
|
||||
|
@ -613,8 +609,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
|
|||
strlcpy(np->dev_name, ndev->name, IFNAMSIZ);
|
||||
INIT_WORK(&np->cleanup_work, netpoll_async_cleanup);
|
||||
|
||||
if ((ndev->priv_flags & IFF_DISABLE_NETPOLL) ||
|
||||
!ndev->netdev_ops->ndo_poll_controller) {
|
||||
if (ndev->priv_flags & IFF_DISABLE_NETPOLL) {
|
||||
np_err(np, "%s doesn't support polling, aborting\n",
|
||||
np->dev_name);
|
||||
err = -ENOTSUPP;
|
||||
|
|
Loading…
Reference in New Issue
Block a user