Merge branch 'hv_netvsc-minor-fixes'

Stephen Hemminger says:

====================
hv_netvsc: minor fixes

These are improvements to netvsc driver. They aren't functionality
changes so not targeting net-next; and they are not show stopper
bugs that need to go to stable either.

v2
   - drop the irq flags patch, defer it to net-next
   - split the multicast filter flag patch out
   - change propogate rx mode patch to handle startup of vf
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2018-03-04 22:18:21 -05:00
commit a7f0fb1bfb
3 changed files with 79 additions and 39 deletions

View File

@ -852,13 +852,6 @@ int netvsc_send(struct net_device *ndev,
if (unlikely(!net_device || net_device->destroy))
return -ENODEV;
/* We may race with netvsc_connect_vsp()/netvsc_init_buf() and get
* here before the negotiation with the host is finished and
* send_section_map may not be allocated yet.
*/
if (unlikely(!net_device->send_section_map))
return -EAGAIN;
nvchan = &net_device->chan_table[packet->q_idx];
packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false;
@ -866,10 +859,8 @@ int netvsc_send(struct net_device *ndev,
/* Send control message directly without accessing msd (Multi-Send
* Data) field which may be changed during data packet processing.
*/
if (!skb) {
cur_send = packet;
goto send_now;
}
if (!skb)
return netvsc_send_pkt(device, packet, net_device, pb, skb);
/* batch packets in send buffer if possible */
msdp = &nvchan->msd;
@ -953,7 +944,6 @@ int netvsc_send(struct net_device *ndev,
}
}
send_now:
if (cur_send)
ret = netvsc_send_pkt(device, cur_send, net_device, pb, skb);
@ -1217,9 +1207,10 @@ int netvsc_poll(struct napi_struct *napi, int budget)
if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
work_done < budget &&
napi_complete_done(napi, work_done) &&
hv_end_read(&channel->inbound)) {
hv_end_read(&channel->inbound) &&
napi_schedule_prep(napi)) {
hv_begin_read(&channel->inbound);
napi_reschedule(napi);
__napi_schedule(napi);
}
/* Driver may overshoot since multiple packets per descriptor */
@ -1242,7 +1233,7 @@ void netvsc_channel_cb(void *context)
/* disable interupts from host */
hv_begin_read(rbi);
__napi_schedule(&nvchan->napi);
__napi_schedule_irqoff(&nvchan->napi);
}
}
@ -1296,7 +1287,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
netvsc_channel_cb, net_device->chan_table);
if (ret != 0) {
netif_napi_del(&net_device->chan_table[0].napi);
netdev_err(ndev, "unable to open channel: %d\n", ret);
goto cleanup;
}
@ -1306,11 +1296,6 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
napi_enable(&net_device->chan_table[0].napi);
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
*/
rcu_assign_pointer(net_device_ctx->nvdev, net_device);
/* Connect with the NetVsp */
ret = netvsc_connect_vsp(device, net_device, device_info);
if (ret != 0) {
@ -1319,6 +1304,11 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
goto close;
}
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
*/
rcu_assign_pointer(net_device_ctx->nvdev, net_device);
return net_device;
close:
@ -1329,6 +1319,7 @@ struct netvsc_device *netvsc_device_add(struct hv_device *device,
vmbus_close(device->channel);
cleanup:
netif_napi_del(&net_device->chan_table[0].napi);
free_netvsc_device(&net_device->rcu);
return ERR_PTR(ret);

View File

@ -66,10 +66,36 @@ static int debug = -1;
module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
static void netvsc_set_multicast_list(struct net_device *net)
static void netvsc_change_rx_flags(struct net_device *net, int change)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
struct net_device_context *ndev_ctx = netdev_priv(net);
struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
int inc;
if (!vf_netdev)
return;
if (change & IFF_PROMISC) {
inc = (net->flags & IFF_PROMISC) ? 1 : -1;
dev_set_promiscuity(vf_netdev, inc);
}
if (change & IFF_ALLMULTI) {
inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
dev_set_allmulti(vf_netdev, inc);
}
}
static void netvsc_set_rx_mode(struct net_device *net)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
if (vf_netdev) {
dev_uc_sync(vf_netdev, net);
dev_mc_sync(vf_netdev, net);
}
rndis_filter_update(nvdev);
}
@ -91,12 +117,11 @@ static int netvsc_open(struct net_device *net)
return ret;
}
netif_tx_wake_all_queues(net);
rdev = nvdev->extension;
if (!rdev->link_state)
if (!rdev->link_state) {
netif_carrier_on(net);
netif_tx_wake_all_queues(net);
}
if (vf_netdev) {
/* Setting synthetic device up transparently sets
@ -299,8 +324,19 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
rcu_read_lock();
vf_netdev = rcu_dereference(ndc->vf_netdev);
if (vf_netdev) {
txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
if (vf_ops->ndo_select_queue)
txq = vf_ops->ndo_select_queue(vf_netdev, skb,
accel_priv, fallback);
else
txq = fallback(vf_netdev, skb);
/* Record the queue selected by VF so that it can be
* used for common case where VF has more queues than
* the synthetic device.
*/
qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
} else {
txq = netvsc_pick_tx(ndev, skb);
}
@ -1576,7 +1612,8 @@ static const struct net_device_ops device_ops = {
.ndo_open = netvsc_open,
.ndo_stop = netvsc_close,
.ndo_start_xmit = netvsc_start_xmit,
.ndo_set_rx_mode = netvsc_set_multicast_list,
.ndo_change_rx_flags = netvsc_change_rx_flags,
.ndo_set_rx_mode = netvsc_set_rx_mode,
.ndo_change_mtu = netvsc_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = netvsc_set_mac_addr,
@ -1807,6 +1844,11 @@ static void __netvsc_vf_setup(struct net_device *ndev,
netdev_warn(vf_netdev,
"unable to change mtu to %u\n", ndev->mtu);
/* set multicast etc flags on VF */
dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
dev_uc_sync(vf_netdev, ndev);
dev_mc_sync(vf_netdev, ndev);
if (netif_running(ndev)) {
ret = dev_open(vf_netdev);
if (ret)

View File

@ -854,15 +854,19 @@ static void rndis_set_multicast(struct work_struct *w)
{
struct rndis_device *rdev
= container_of(w, struct rndis_device, mcast_work);
u32 filter = NDIS_PACKET_TYPE_DIRECTED;
unsigned int flags = rdev->ndev->flags;
if (rdev->ndev->flags & IFF_PROMISC)
rndis_filter_set_packet_filter(rdev,
NDIS_PACKET_TYPE_PROMISCUOUS);
else
rndis_filter_set_packet_filter(rdev,
NDIS_PACKET_TYPE_BROADCAST |
NDIS_PACKET_TYPE_ALL_MULTICAST |
NDIS_PACKET_TYPE_DIRECTED);
if (flags & IFF_PROMISC) {
filter = NDIS_PACKET_TYPE_PROMISCUOUS;
} else {
if (flags & IFF_ALLMULTI)
flags |= NDIS_PACKET_TYPE_ALL_MULTICAST;
if (flags & IFF_BROADCAST)
flags |= NDIS_PACKET_TYPE_BROADCAST;
}
rndis_filter_set_packet_filter(rdev, filter);
}
void rndis_filter_update(struct netvsc_device *nvdev)
@ -1340,6 +1344,9 @@ void rndis_filter_device_remove(struct hv_device *dev,
{
struct rndis_device *rndis_dev = net_dev->extension;
/* Don't try and setup sub channels if about to halt */
cancel_work_sync(&net_dev->subchan_work);
/* Halt and release the rndis device */
rndis_filter_halt_device(rndis_dev);