forked from luck/tmp_suning_uos_patched
netdev: Add netdev->select_queue() method.
Devices or device layers can set this to control the queue selection performed by dev_pick_tx(). This function runs under RCU protection, which allows overriding functions to have some way of synchronizing with things like dynamic ->real_num_tx_queues adjustments. This makes the spinlock prefetch in dev_queue_xmit() a little bit less effective, but that's the price right now for correctness. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e3c50d5d25
commit
eae792b722
@ -724,6 +724,9 @@ struct net_device
|
||||
void (*poll_controller)(struct net_device *dev);
|
||||
#endif
|
||||
|
||||
u16 (*select_queue)(struct net_device *dev,
|
||||
struct sk_buff *skb);
|
||||
|
||||
#ifdef CONFIG_NET_NS
|
||||
/* Network namespace this network device is inside */
|
||||
struct net *nd_net;
|
||||
|
@ -1670,6 +1670,9 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
|
||||
{
|
||||
u16 queue_index = 0;
|
||||
|
||||
if (dev->select_queue)
|
||||
queue_index = dev->select_queue(dev, skb);
|
||||
|
||||
skb_set_queue_mapping(skb, queue_index);
|
||||
return netdev_get_tx_queue(dev, queue_index);
|
||||
}
|
||||
@ -1710,14 +1713,14 @@ int dev_queue_xmit(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
gso:
|
||||
txq = dev_pick_tx(dev, skb);
|
||||
spin_lock_prefetch(&txq->lock);
|
||||
|
||||
/* Disable soft irqs for various locks below. Also
|
||||
* stops preemption for RCU.
|
||||
*/
|
||||
rcu_read_lock_bh();
|
||||
|
||||
txq = dev_pick_tx(dev, skb);
|
||||
spin_lock_prefetch(&txq->lock);
|
||||
|
||||
/* Updates of qdisc are serialized by queue->lock.
|
||||
* The struct Qdisc which is pointed to by qdisc is now a
|
||||
* rcu structure - it may be accessed without acquiring
|
||||
|
Loading…
Reference in New Issue
Block a user