forked from luck/tmp_suning_uos_patched
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "A couple interesting SKB fragment handling fixes, plus the usual small bits here and there: 1) Fix 64-bit divide build failure on 32-bit platforms in mlx5, from Tim Gardner. 2) Get rid of a stupid reimplementation on "%*phC" in our sysfs MAC address printing helper. 3) Fix NETIF_F_SG capability advertisement in hyperv driver, if the device can't do checksumming offloads then it shouldn't say it can do SG either. From Haiyang Zhang. 4) bgmac needs to depend on PHYLIB, from Hauke Mehrtens. 5) Don't leak DMA mappings on mapping failures, from Neil Horman. 6) We need to reset the transport header of SKBs in ipv4 before we attempt to perform early socket demux, just like ipv6 does. From Eric Dumazet. 7) Add missing locking on vxlan device removal, from Stephen Hemminger. 8) xen-netfront has to make two passes over an SKB to prepare it for transfer. One pass calculates the number of slots needed, the second massages the SKB and fills the slots. Unfortunately, the first pass doesn't calculate the number of slots properly so we can end up trying to build a MAX_SKB_FRAGS + 1 SKB which doesn't work out so well. Fix from Jan Beulich with help and discussion with several others. 9) Fix a similar problem in tun and macvtap, which have to split up scatter-gather elements at PAGE_SIZE boundaries. Don't do zerocopy if it would result in a > MAX_SKB_FRAGS skb. Fixes from Jason Wang. 10) On receive, once we've decoded the VLAN state completely, clear skb->vlan_tci. Otherwise demuxed tunnels underneath can trigger the VLAN code again, corrupting the packet. Fix from Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: vlan: fix a race in egress prio management vlan: mask vlan prio bits macvtap: do not zerocopy if iov needs more pages than MAX_SKB_FRAGS tuntap: do not zerocopy if iov needs more pages than MAX_SKB_FRAGS pkt_sched: sch_qfq: remove a source of high packet delay/jitter xen-netfront: pull on receive skb may need to happen earlier vxlan: add necessary locking on device removal hyperv: Fix the NETIF_F_SG flag setting in netvsc net: Fix sysfs_format_mac() code duplication. be2net: Fix to avoid hardware workaround when not needed macvtap: do not assume 802.1Q when send vlan packets macvtap: fix the missing ret value of TUNSETQUEUE ipv4: set transport header earlier mlx5 core: Fix __udivdi3 when compiling for 32 bit arches bgmac: add dependency to phylib net/irda: fixed style issues in irlan_eth ethtool: fixed trailing statements in ethtool ndisc: bool initializations should use true and false atl1e: unmap partially mapped skb on dma error and free skb
This commit is contained in:
commit
ecb2cf1a6b
|
@ -1678,6 +1678,7 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
|
|||
u16 f;
|
||||
int segment;
|
||||
int ring_start = adapter->tx_ring.next_to_use;
|
||||
int ring_end;
|
||||
|
||||
nr_frags = skb_shinfo(skb)->nr_frags;
|
||||
segment = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK;
|
||||
|
@ -1721,6 +1722,15 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
|
|||
map_len, PCI_DMA_TODEVICE);
|
||||
|
||||
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
|
||||
/* We need to unwind the mappings we've done */
|
||||
ring_end = adapter->tx_ring.next_to_use;
|
||||
adapter->tx_ring.next_to_use = ring_start;
|
||||
while (adapter->tx_ring.next_to_use != ring_end) {
|
||||
tpd = atl1e_get_tpd(adapter);
|
||||
tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
|
||||
pci_unmap_single(adapter->pdev, tx_buffer->dma,
|
||||
tx_buffer->length, PCI_DMA_TODEVICE);
|
||||
}
|
||||
/* Reset the tx rings next pointer */
|
||||
adapter->tx_ring.next_to_use = ring_start;
|
||||
return -ENOSPC;
|
||||
|
@ -1763,6 +1773,16 @@ static int atl1e_tx_map(struct atl1e_adapter *adapter,
|
|||
DMA_TO_DEVICE);
|
||||
|
||||
if (dma_mapping_error(&adapter->pdev->dev, tx_buffer->dma)) {
|
||||
/* We need to unwind the mappings we've done */
|
||||
ring_end = adapter->tx_ring.next_to_use;
|
||||
adapter->tx_ring.next_to_use = ring_start;
|
||||
while (adapter->tx_ring.next_to_use != ring_end) {
|
||||
tpd = atl1e_get_tpd(adapter);
|
||||
tx_buffer = atl1e_get_tx_buffer(adapter, tpd);
|
||||
dma_unmap_page(&adapter->pdev->dev, tx_buffer->dma,
|
||||
tx_buffer->length, DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/* Reset the ring next to use pointer */
|
||||
adapter->tx_ring.next_to_use = ring_start;
|
||||
return -ENOSPC;
|
||||
|
@ -1853,8 +1873,10 @@ static netdev_tx_t atl1e_xmit_frame(struct sk_buff *skb,
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
if (atl1e_tx_map(adapter, skb, tpd))
|
||||
if (atl1e_tx_map(adapter, skb, tpd)) {
|
||||
dev_kfree_skb_any(skb);
|
||||
goto out;
|
||||
}
|
||||
|
||||
atl1e_tx_queue(adapter, tpd_req, tpd);
|
||||
|
||||
|
|
|
@ -131,6 +131,7 @@ config BNX2X_SRIOV
|
|||
config BGMAC
|
||||
tristate "BCMA bus GBit core support"
|
||||
depends on BCMA_HOST_SOC && HAS_DMA
|
||||
select PHYLIB
|
||||
---help---
|
||||
This driver supports GBit MAC and BCM4706 GBit MAC cores on BCMA bus.
|
||||
They can be found on BCM47xx SoCs and provide gigabit ethernet.
|
||||
|
|
|
@ -782,16 +782,22 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
|
|||
|
||||
if (vlan_tx_tag_present(skb))
|
||||
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
|
||||
else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
|
||||
vlan_tag = adapter->pvid;
|
||||
|
||||
if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
|
||||
if (!vlan_tag)
|
||||
vlan_tag = adapter->pvid;
|
||||
/* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
|
||||
* skip VLAN insertion
|
||||
*/
|
||||
if (skip_hw_vlan)
|
||||
*skip_hw_vlan = true;
|
||||
}
|
||||
|
||||
if (vlan_tag) {
|
||||
skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
|
||||
if (unlikely(!skb))
|
||||
return skb;
|
||||
skb->vlan_tci = 0;
|
||||
if (skip_hw_vlan)
|
||||
*skip_hw_vlan = true;
|
||||
}
|
||||
|
||||
/* Insert the outer VLAN, if any */
|
||||
|
|
|
@ -156,7 +156,7 @@ static ssize_t average_read(struct file *filp, char __user *buf, size_t count,
|
|||
stats = filp->private_data;
|
||||
spin_lock(&stats->lock);
|
||||
if (stats->n)
|
||||
field = stats->sum / stats->n;
|
||||
field = div64_u64(stats->sum, stats->n);
|
||||
spin_unlock(&stats->lock);
|
||||
ret = snprintf(tbuf, sizeof(tbuf), "%llu\n", field);
|
||||
if (ret > 0) {
|
||||
|
|
|
@ -431,8 +431,8 @@ static int netvsc_probe(struct hv_device *dev,
|
|||
net->netdev_ops = &device_ops;
|
||||
|
||||
/* TODO: Add GSO and Checksum offload */
|
||||
net->hw_features = NETIF_F_SG;
|
||||
net->features = NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_TX;
|
||||
net->hw_features = 0;
|
||||
net->features = NETIF_F_HW_VLAN_CTAG_TX;
|
||||
|
||||
SET_ETHTOOL_OPS(net, ðtool_ops);
|
||||
SET_NETDEV_DEV(net, &dev->device);
|
||||
|
|
|
@ -698,6 +698,28 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long iov_pages(const struct iovec *iv, int offset,
|
||||
unsigned long nr_segs)
|
||||
{
|
||||
unsigned long seg, base;
|
||||
int pages = 0, len, size;
|
||||
|
||||
while (nr_segs && (offset >= iv->iov_len)) {
|
||||
offset -= iv->iov_len;
|
||||
++iv;
|
||||
--nr_segs;
|
||||
}
|
||||
|
||||
for (seg = 0; seg < nr_segs; seg++) {
|
||||
base = (unsigned long)iv[seg].iov_base + offset;
|
||||
len = iv[seg].iov_len - offset;
|
||||
size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
|
||||
pages += size;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
/* Get packet from user space buffer */
|
||||
static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
||||
|
@ -744,31 +766,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|||
if (unlikely(count > UIO_MAXIOV))
|
||||
goto err;
|
||||
|
||||
if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY))
|
||||
zerocopy = true;
|
||||
|
||||
if (zerocopy) {
|
||||
/* Userspace may produce vectors with count greater than
|
||||
* MAX_SKB_FRAGS, so we need to linearize parts of the skb
|
||||
* to let the rest of data to be fit in the frags.
|
||||
*/
|
||||
if (count > MAX_SKB_FRAGS) {
|
||||
copylen = iov_length(iv, count - MAX_SKB_FRAGS);
|
||||
if (copylen < vnet_hdr_len)
|
||||
copylen = 0;
|
||||
else
|
||||
copylen -= vnet_hdr_len;
|
||||
}
|
||||
/* There are 256 bytes to be copied in skb, so there is enough
|
||||
* room for skb expand head in case it is used.
|
||||
* The rest buffer is mapped from userspace.
|
||||
*/
|
||||
if (copylen < vnet_hdr.hdr_len)
|
||||
copylen = vnet_hdr.hdr_len;
|
||||
if (!copylen)
|
||||
copylen = GOODCOPY_LEN;
|
||||
if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
|
||||
copylen = vnet_hdr.hdr_len ? vnet_hdr.hdr_len : GOODCOPY_LEN;
|
||||
linear = copylen;
|
||||
} else {
|
||||
if (iov_pages(iv, vnet_hdr_len + copylen, count)
|
||||
<= MAX_SKB_FRAGS)
|
||||
zerocopy = true;
|
||||
}
|
||||
|
||||
if (!zerocopy) {
|
||||
copylen = len;
|
||||
linear = vnet_hdr.hdr_len;
|
||||
}
|
||||
|
@ -780,9 +786,15 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
|
|||
|
||||
if (zerocopy)
|
||||
err = zerocopy_sg_from_iovec(skb, iv, vnet_hdr_len, count);
|
||||
else
|
||||
else {
|
||||
err = skb_copy_datagram_from_iovec(skb, 0, iv, vnet_hdr_len,
|
||||
len);
|
||||
if (!err && m && m->msg_control) {
|
||||
struct ubuf_info *uarg = m->msg_control;
|
||||
uarg->callback(uarg, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (err)
|
||||
goto err_kfree;
|
||||
|
||||
|
@ -873,7 +885,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
|
|||
__be16 h_vlan_proto;
|
||||
__be16 h_vlan_TCI;
|
||||
} veth;
|
||||
veth.h_vlan_proto = htons(ETH_P_8021Q);
|
||||
veth.h_vlan_proto = skb->vlan_proto;
|
||||
veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
|
||||
|
||||
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
|
||||
|
@ -1107,6 +1119,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
|
|||
rtnl_lock();
|
||||
ret = macvtap_ioctl_set_queue(file, u);
|
||||
rtnl_unlock();
|
||||
return ret;
|
||||
|
||||
case TUNGETFEATURES:
|
||||
if (put_user(IFF_TAP | IFF_NO_PI | IFF_VNET_HDR |
|
||||
|
|
|
@ -1035,6 +1035,29 @@ static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long iov_pages(const struct iovec *iv, int offset,
|
||||
unsigned long nr_segs)
|
||||
{
|
||||
unsigned long seg, base;
|
||||
int pages = 0, len, size;
|
||||
|
||||
while (nr_segs && (offset >= iv->iov_len)) {
|
||||
offset -= iv->iov_len;
|
||||
++iv;
|
||||
--nr_segs;
|
||||
}
|
||||
|
||||
for (seg = 0; seg < nr_segs; seg++) {
|
||||
base = (unsigned long)iv[seg].iov_base + offset;
|
||||
len = iv[seg].iov_len - offset;
|
||||
size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
|
||||
pages += size;
|
||||
offset = 0;
|
||||
}
|
||||
|
||||
return pages;
|
||||
}
|
||||
|
||||
/* Get packet from user space buffer */
|
||||
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||
void *msg_control, const struct iovec *iv,
|
||||
|
@ -1082,32 +1105,18 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (msg_control)
|
||||
zerocopy = true;
|
||||
|
||||
if (zerocopy) {
|
||||
/* Userspace may produce vectors with count greater than
|
||||
* MAX_SKB_FRAGS, so we need to linearize parts of the skb
|
||||
* to let the rest of data to be fit in the frags.
|
||||
*/
|
||||
if (count > MAX_SKB_FRAGS) {
|
||||
copylen = iov_length(iv, count - MAX_SKB_FRAGS);
|
||||
if (copylen < offset)
|
||||
copylen = 0;
|
||||
else
|
||||
copylen -= offset;
|
||||
} else
|
||||
copylen = 0;
|
||||
/* There are 256 bytes to be copied in skb, so there is enough
|
||||
* room for skb expand head in case it is used.
|
||||
if (msg_control) {
|
||||
/* There are 256 bytes to be copied in skb, so there is
|
||||
* enough room for skb expand head in case it is used.
|
||||
* The rest of the buffer is mapped from userspace.
|
||||
*/
|
||||
if (copylen < gso.hdr_len)
|
||||
copylen = gso.hdr_len;
|
||||
if (!copylen)
|
||||
copylen = GOODCOPY_LEN;
|
||||
copylen = gso.hdr_len ? gso.hdr_len : GOODCOPY_LEN;
|
||||
linear = copylen;
|
||||
} else {
|
||||
if (iov_pages(iv, offset + copylen, count) <= MAX_SKB_FRAGS)
|
||||
zerocopy = true;
|
||||
}
|
||||
|
||||
if (!zerocopy) {
|
||||
copylen = len;
|
||||
linear = gso.hdr_len;
|
||||
}
|
||||
|
@ -1121,8 +1130,13 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
|||
|
||||
if (zerocopy)
|
||||
err = zerocopy_sg_from_iovec(skb, iv, offset, count);
|
||||
else
|
||||
else {
|
||||
err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
|
||||
if (!err && msg_control) {
|
||||
struct ubuf_info *uarg = msg_control;
|
||||
uarg->callback(uarg, false);
|
||||
}
|
||||
}
|
||||
|
||||
if (err) {
|
||||
tun->dev->stats.rx_dropped++;
|
||||
|
|
|
@ -1767,9 +1767,15 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
|
|||
|
||||
static void vxlan_dellink(struct net_device *dev, struct list_head *head)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
|
||||
flush_workqueue(vxlan_wq);
|
||||
|
||||
spin_lock(&vn->sock_lock);
|
||||
hlist_del_rcu(&vxlan->hlist);
|
||||
spin_unlock(&vn->sock_lock);
|
||||
|
||||
list_del(&vxlan->next);
|
||||
unregister_netdevice_queue(dev, head);
|
||||
}
|
||||
|
|
|
@ -286,8 +286,7 @@ static void xennet_alloc_rx_buffers(struct net_device *dev)
|
|||
break;
|
||||
}
|
||||
|
||||
__skb_fill_page_desc(skb, 0, page, 0, 0);
|
||||
skb_shinfo(skb)->nr_frags = 1;
|
||||
skb_add_rx_frag(skb, 0, page, 0, 0, PAGE_SIZE);
|
||||
__skb_queue_tail(&np->rx_batch, skb);
|
||||
}
|
||||
|
||||
|
@ -831,7 +830,6 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
|
|||
struct sk_buff_head *list)
|
||||
{
|
||||
struct skb_shared_info *shinfo = skb_shinfo(skb);
|
||||
int nr_frags = shinfo->nr_frags;
|
||||
RING_IDX cons = np->rx.rsp_cons;
|
||||
struct sk_buff *nskb;
|
||||
|
||||
|
@ -840,19 +838,21 @@ static RING_IDX xennet_fill_frags(struct netfront_info *np,
|
|||
RING_GET_RESPONSE(&np->rx, ++cons);
|
||||
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];
|
||||
|
||||
__skb_fill_page_desc(skb, nr_frags,
|
||||
skb_frag_page(nfrag),
|
||||
rx->offset, rx->status);
|
||||
if (shinfo->nr_frags == MAX_SKB_FRAGS) {
|
||||
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
skb->data_len += rx->status;
|
||||
BUG_ON(pull_to <= skb_headlen(skb));
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
}
|
||||
BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
|
||||
|
||||
skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
|
||||
rx->offset, rx->status, PAGE_SIZE);
|
||||
|
||||
skb_shinfo(nskb)->nr_frags = 0;
|
||||
kfree_skb(nskb);
|
||||
|
||||
nr_frags++;
|
||||
}
|
||||
|
||||
shinfo->nr_frags = nr_frags;
|
||||
return cons;
|
||||
}
|
||||
|
||||
|
@ -933,7 +933,8 @@ static int handle_incoming_queue(struct net_device *dev,
|
|||
while ((skb = __skb_dequeue(rxq)) != NULL) {
|
||||
int pull_to = NETFRONT_SKB_CB(skb)->pull_to;
|
||||
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
if (pull_to > skb_headlen(skb))
|
||||
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
|
||||
|
||||
/* Ethernet work: Delayed to here as it peeks the header. */
|
||||
skb->protocol = eth_type_trans(skb, dev);
|
||||
|
@ -1019,16 +1020,10 @@ static int xennet_poll(struct napi_struct *napi, int budget)
|
|||
skb_shinfo(skb)->frags[0].page_offset = rx->offset;
|
||||
skb_frag_size_set(&skb_shinfo(skb)->frags[0], rx->status);
|
||||
skb->data_len = rx->status;
|
||||
skb->len += rx->status;
|
||||
|
||||
i = xennet_fill_frags(np, skb, &tmpq);
|
||||
|
||||
/*
|
||||
* Truesize is the actual allocation size, even if the
|
||||
* allocation is only partially used.
|
||||
*/
|
||||
skb->truesize += PAGE_SIZE * skb_shinfo(skb)->nr_frags;
|
||||
skb->len += skb->data_len;
|
||||
|
||||
if (rx->flags & XEN_NETRXF_csum_blank)
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
else if (rx->flags & XEN_NETRXF_data_validated)
|
||||
|
|
|
@ -79,9 +79,8 @@ static inline int is_vlan_dev(struct net_device *dev)
|
|||
}
|
||||
|
||||
#define vlan_tx_tag_present(__skb) ((__skb)->vlan_tci & VLAN_TAG_PRESENT)
|
||||
#define vlan_tx_nonzero_tag_present(__skb) \
|
||||
(vlan_tx_tag_present(__skb) && ((__skb)->vlan_tci & VLAN_VID_MASK))
|
||||
#define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT)
|
||||
#define vlan_tx_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK)
|
||||
|
||||
#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ bool vlan_do_receive(struct sk_buff **skbp)
|
|||
{
|
||||
struct sk_buff *skb = *skbp;
|
||||
__be16 vlan_proto = skb->vlan_proto;
|
||||
u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
|
||||
u16 vlan_id = vlan_tx_tag_get_id(skb);
|
||||
struct net_device *vlan_dev;
|
||||
struct vlan_pcpu_stats *rx_stats;
|
||||
|
||||
|
|
|
@ -73,6 +73,8 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb)
|
|||
{
|
||||
struct vlan_priority_tci_mapping *mp;
|
||||
|
||||
smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */
|
||||
|
||||
mp = vlan_dev_priv(dev)->egress_priority_map[(skb->priority & 0xF)];
|
||||
while (mp) {
|
||||
if (mp->priority == skb->priority) {
|
||||
|
@ -249,6 +251,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev,
|
|||
np->next = mp;
|
||||
np->priority = skb_prio;
|
||||
np->vlan_qos = vlan_qos;
|
||||
/* Before inserting this element in hash table, make sure all its fields
|
||||
* are committed to memory.
|
||||
* coupled with smp_rmb() in vlan_dev_get_egress_qos_mask()
|
||||
*/
|
||||
smp_wmb();
|
||||
vlan->egress_priority_map[skb_prio & 0xF] = np;
|
||||
if (vlan_qos)
|
||||
vlan->nr_egress_mappings++;
|
||||
|
|
|
@ -3580,8 +3580,15 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
|
|||
}
|
||||
}
|
||||
|
||||
if (vlan_tx_nonzero_tag_present(skb))
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
if (unlikely(vlan_tx_tag_present(skb))) {
|
||||
if (vlan_tx_tag_get_id(skb))
|
||||
skb->pkt_type = PACKET_OTHERHOST;
|
||||
/* Note: we might in the future use prio bits
|
||||
* and set skb->priority like in vlan_do_receive()
|
||||
* For the time being, just ignore Priority Code Point
|
||||
*/
|
||||
skb->vlan_tci = 0;
|
||||
}
|
||||
|
||||
/* deliver only exact match when indicated */
|
||||
null_or_dev = deliver_exact ? skb->dev : NULL;
|
||||
|
|
|
@ -279,11 +279,16 @@ static u32 __ethtool_get_flags(struct net_device *dev)
|
|||
{
|
||||
u32 flags = 0;
|
||||
|
||||
if (dev->features & NETIF_F_LRO) flags |= ETH_FLAG_LRO;
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX) flags |= ETH_FLAG_RXVLAN;
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_TX) flags |= ETH_FLAG_TXVLAN;
|
||||
if (dev->features & NETIF_F_NTUPLE) flags |= ETH_FLAG_NTUPLE;
|
||||
if (dev->features & NETIF_F_RXHASH) flags |= ETH_FLAG_RXHASH;
|
||||
if (dev->features & NETIF_F_LRO)
|
||||
flags |= ETH_FLAG_LRO;
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
|
||||
flags |= ETH_FLAG_RXVLAN;
|
||||
if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
|
||||
flags |= ETH_FLAG_TXVLAN;
|
||||
if (dev->features & NETIF_F_NTUPLE)
|
||||
flags |= ETH_FLAG_NTUPLE;
|
||||
if (dev->features & NETIF_F_RXHASH)
|
||||
flags |= ETH_FLAG_RXHASH;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
@ -295,11 +300,16 @@ static int __ethtool_set_flags(struct net_device *dev, u32 data)
|
|||
if (data & ~ETH_ALL_FLAGS)
|
||||
return -EINVAL;
|
||||
|
||||
if (data & ETH_FLAG_LRO) features |= NETIF_F_LRO;
|
||||
if (data & ETH_FLAG_RXVLAN) features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
if (data & ETH_FLAG_TXVLAN) features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if (data & ETH_FLAG_NTUPLE) features |= NETIF_F_NTUPLE;
|
||||
if (data & ETH_FLAG_RXHASH) features |= NETIF_F_RXHASH;
|
||||
if (data & ETH_FLAG_LRO)
|
||||
features |= NETIF_F_LRO;
|
||||
if (data & ETH_FLAG_RXVLAN)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_RX;
|
||||
if (data & ETH_FLAG_TXVLAN)
|
||||
features |= NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if (data & ETH_FLAG_NTUPLE)
|
||||
features |= NETIF_F_NTUPLE;
|
||||
if (data & ETH_FLAG_RXHASH)
|
||||
features |= NETIF_F_RXHASH;
|
||||
|
||||
/* allow changing only bits set in hw_features */
|
||||
changed = (features ^ dev->features) & ETH_ALL_FEATURES;
|
||||
|
|
|
@ -401,27 +401,8 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
|
|||
}
|
||||
EXPORT_SYMBOL(alloc_etherdev_mqs);
|
||||
|
||||
static size_t _format_mac_addr(char *buf, int buflen,
|
||||
const unsigned char *addr, int len)
|
||||
{
|
||||
int i;
|
||||
char *cp = buf;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
|
||||
if (i == len - 1)
|
||||
break;
|
||||
cp += scnprintf(cp, buflen - (cp - buf), ":");
|
||||
}
|
||||
return cp - buf;
|
||||
}
|
||||
|
||||
ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
|
||||
{
|
||||
size_t l;
|
||||
|
||||
l = _format_mac_addr(buf, PAGE_SIZE, addr, len);
|
||||
l += scnprintf(buf + l, PAGE_SIZE - l, "\n");
|
||||
return (ssize_t)l;
|
||||
return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
|
||||
}
|
||||
EXPORT_SYMBOL(sysfs_format_mac);
|
||||
|
|
|
@ -190,10 +190,7 @@ static int ip_local_deliver_finish(struct sk_buff *skb)
|
|||
{
|
||||
struct net *net = dev_net(skb->dev);
|
||||
|
||||
__skb_pull(skb, ip_hdrlen(skb));
|
||||
|
||||
/* Point into the IP datagram, just past the header. */
|
||||
skb_reset_transport_header(skb);
|
||||
__skb_pull(skb, skb_network_header_len(skb));
|
||||
|
||||
rcu_read_lock();
|
||||
{
|
||||
|
@ -437,6 +434,8 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
|
|||
goto drop;
|
||||
}
|
||||
|
||||
skb->transport_header = skb->network_header + iph->ihl*4;
|
||||
|
||||
/* Remove any debris in the socket control block */
|
||||
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
|
||||
|
||||
|
|
|
@ -479,7 +479,7 @@ static void ndisc_send_na(struct net_device *dev, struct neighbour *neigh,
|
|||
if (ifp) {
|
||||
src_addr = solicited_addr;
|
||||
if (ifp->flags & IFA_F_OPTIMISTIC)
|
||||
override = 0;
|
||||
override = false;
|
||||
inc_opt |= ifp->idev->cnf.force_tllao;
|
||||
in6_ifa_put(ifp);
|
||||
} else {
|
||||
|
@ -557,7 +557,7 @@ void ndisc_send_ns(struct net_device *dev, struct neighbour *neigh,
|
|||
}
|
||||
|
||||
if (ipv6_addr_any(saddr))
|
||||
inc_opt = 0;
|
||||
inc_opt = false;
|
||||
if (inc_opt)
|
||||
optlen += ndisc_opt_addr_space(dev);
|
||||
|
||||
|
@ -790,7 +790,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
|
|||
(is_router = pndisc_is_router(&msg->target, dev)) >= 0)) {
|
||||
if (!(NEIGH_CB(skb)->flags & LOCALLY_ENQUEUED) &&
|
||||
skb->pkt_type != PACKET_HOST &&
|
||||
inc != 0 &&
|
||||
inc &&
|
||||
idev->nd_parms->proxy_delay != 0) {
|
||||
/*
|
||||
* for anycast or proxy,
|
||||
|
|
|
@ -44,12 +44,12 @@ static int irlan_eth_open(struct net_device *dev);
|
|||
static int irlan_eth_close(struct net_device *dev);
|
||||
static netdev_tx_t irlan_eth_xmit(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
static void irlan_eth_set_multicast_list( struct net_device *dev);
|
||||
static void irlan_eth_set_multicast_list(struct net_device *dev);
|
||||
|
||||
static const struct net_device_ops irlan_eth_netdev_ops = {
|
||||
.ndo_open = irlan_eth_open,
|
||||
.ndo_stop = irlan_eth_close,
|
||||
.ndo_start_xmit = irlan_eth_xmit,
|
||||
.ndo_open = irlan_eth_open,
|
||||
.ndo_stop = irlan_eth_close,
|
||||
.ndo_start_xmit = irlan_eth_xmit,
|
||||
.ndo_set_rx_mode = irlan_eth_set_multicast_list,
|
||||
.ndo_change_mtu = eth_change_mtu,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
@ -110,7 +110,7 @@ static int irlan_eth_open(struct net_device *dev)
|
|||
{
|
||||
struct irlan_cb *self = netdev_priv(dev);
|
||||
|
||||
IRDA_DEBUG(2, "%s()\n", __func__ );
|
||||
IRDA_DEBUG(2, "%s()\n", __func__);
|
||||
|
||||
/* Ready to play! */
|
||||
netif_stop_queue(dev); /* Wait until data link is ready */
|
||||
|
@ -137,7 +137,7 @@ static int irlan_eth_close(struct net_device *dev)
|
|||
{
|
||||
struct irlan_cb *self = netdev_priv(dev);
|
||||
|
||||
IRDA_DEBUG(2, "%s()\n", __func__ );
|
||||
IRDA_DEBUG(2, "%s()\n", __func__);
|
||||
|
||||
/* Stop device */
|
||||
netif_stop_queue(dev);
|
||||
|
@ -310,35 +310,32 @@ static void irlan_eth_set_multicast_list(struct net_device *dev)
|
|||
{
|
||||
struct irlan_cb *self = netdev_priv(dev);
|
||||
|
||||
IRDA_DEBUG(2, "%s()\n", __func__ );
|
||||
IRDA_DEBUG(2, "%s()\n", __func__);
|
||||
|
||||
/* Check if data channel has been connected yet */
|
||||
if (self->client.state != IRLAN_DATA) {
|
||||
IRDA_DEBUG(1, "%s(), delaying!\n", __func__ );
|
||||
IRDA_DEBUG(1, "%s(), delaying!\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (dev->flags & IFF_PROMISC) {
|
||||
/* Enable promiscuous mode */
|
||||
IRDA_WARNING("Promiscuous mode not implemented by IrLAN!\n");
|
||||
}
|
||||
else if ((dev->flags & IFF_ALLMULTI) ||
|
||||
} else if ((dev->flags & IFF_ALLMULTI) ||
|
||||
netdev_mc_count(dev) > HW_MAX_ADDRS) {
|
||||
/* Disable promiscuous mode, use normal mode. */
|
||||
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
|
||||
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__);
|
||||
/* hardware_set_filter(NULL); */
|
||||
|
||||
irlan_set_multicast_filter(self, TRUE);
|
||||
}
|
||||
else if (!netdev_mc_empty(dev)) {
|
||||
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__ );
|
||||
} else if (!netdev_mc_empty(dev)) {
|
||||
IRDA_DEBUG(4, "%s(), Setting multicast filter\n", __func__);
|
||||
/* Walk the address list, and load the filter */
|
||||
/* hardware_set_filter(dev->mc_list); */
|
||||
|
||||
irlan_set_multicast_filter(self, TRUE);
|
||||
}
|
||||
else {
|
||||
IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__ );
|
||||
} else {
|
||||
IRDA_DEBUG(4, "%s(), Clearing multicast filter\n", __func__);
|
||||
irlan_set_multicast_filter(self, FALSE);
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,6 @@
|
|||
|
||||
#define FRAC_BITS 30 /* fixed point arithmetic */
|
||||
#define ONE_FP (1UL << FRAC_BITS)
|
||||
#define IWSUM (ONE_FP/QFQ_MAX_WSUM)
|
||||
|
||||
#define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
|
||||
#define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
|
||||
|
@ -189,6 +188,7 @@ struct qfq_sched {
|
|||
struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
|
||||
u32 num_active_agg; /* Num. of active aggregates */
|
||||
u32 wsum; /* weight sum */
|
||||
u32 iwsum; /* inverse weight sum */
|
||||
|
||||
unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
|
||||
struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
|
||||
|
@ -314,6 +314,7 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
|
|||
|
||||
q->wsum +=
|
||||
(int) agg->class_weight * (new_num_classes - agg->num_classes);
|
||||
q->iwsum = ONE_FP / q->wsum;
|
||||
|
||||
agg->num_classes = new_num_classes;
|
||||
}
|
||||
|
@ -340,6 +341,10 @@ static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
|
|||
{
|
||||
if (!hlist_unhashed(&agg->nonfull_next))
|
||||
hlist_del_init(&agg->nonfull_next);
|
||||
q->wsum -= agg->class_weight;
|
||||
if (q->wsum != 0)
|
||||
q->iwsum = ONE_FP / q->wsum;
|
||||
|
||||
if (q->in_serv_agg == agg)
|
||||
q->in_serv_agg = qfq_choose_next_agg(q);
|
||||
kfree(agg);
|
||||
|
@ -834,38 +839,60 @@ static void qfq_make_eligible(struct qfq_sched *q)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* The index of the slot in which the aggregate is to be inserted must
|
||||
* not be higher than QFQ_MAX_SLOTS-2. There is a '-2' and not a '-1'
|
||||
* because the start time of the group may be moved backward by one
|
||||
* slot after the aggregate has been inserted, and this would cause
|
||||
* non-empty slots to be right-shifted by one position.
|
||||
* The index of the slot in which the input aggregate agg is to be
|
||||
* inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
|
||||
* and not a '-1' because the start time of the group may be moved
|
||||
* backward by one slot after the aggregate has been inserted, and
|
||||
* this would cause non-empty slots to be right-shifted by one
|
||||
* position.
|
||||
*
|
||||
* If the weight and lmax (max_pkt_size) of the classes do not change,
|
||||
* then QFQ+ does meet the above contraint according to the current
|
||||
* values of its parameters. In fact, if the weight and lmax of the
|
||||
* classes do not change, then, from the theory, QFQ+ guarantees that
|
||||
* the slot index is never higher than
|
||||
* 2 + QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
|
||||
* (QFQ_MAX_WEIGHT/QFQ_MAX_WSUM) = 2 + 8 * 128 * (1 / 64) = 18
|
||||
* QFQ+ fully satisfies this bound to the slot index if the parameters
|
||||
* of the classes are not changed dynamically, and if QFQ+ never
|
||||
* happens to postpone the service of agg unjustly, i.e., it never
|
||||
* happens that the aggregate becomes backlogged and eligible, or just
|
||||
* eligible, while an aggregate with a higher approximated finish time
|
||||
* is being served. In particular, in this case QFQ+ guarantees that
|
||||
* the timestamps of agg are low enough that the slot index is never
|
||||
* higher than 2. Unfortunately, QFQ+ cannot provide the same
|
||||
* guarantee if it happens to unjustly postpone the service of agg, or
|
||||
* if the parameters of some class are changed.
|
||||
*
|
||||
* When the weight of a class is increased or the lmax of the class is
|
||||
* decreased, a new aggregate with smaller slot size than the original
|
||||
* parent aggregate of the class may happen to be activated. The
|
||||
* activation of this aggregate should be properly delayed to when the
|
||||
* service of the class has finished in the ideal system tracked by
|
||||
* QFQ+. If the activation of the aggregate is not delayed to this
|
||||
* reference time instant, then this aggregate may be unjustly served
|
||||
* before other aggregates waiting for service. This may cause the
|
||||
* above bound to the slot index to be violated for some of these
|
||||
* unlucky aggregates.
|
||||
* As for the first event, i.e., an out-of-order service, the
|
||||
* upper bound to the slot index guaranteed by QFQ+ grows to
|
||||
* 2 +
|
||||
* QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
|
||||
* (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
|
||||
*
|
||||
* The following function deals with this problem by backward-shifting
|
||||
* the timestamps of agg, if needed, so as to guarantee that the slot
|
||||
* index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
|
||||
* cause the service of other aggregates to be postponed, yet the
|
||||
* worst-case guarantees of these aggregates are not violated. In
|
||||
* fact, in case of no out-of-order service, the timestamps of agg
|
||||
* would have been even lower than they are after the backward shift,
|
||||
* because QFQ+ would have guaranteed a maximum value equal to 2 for
|
||||
* the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
|
||||
* service is postponed because of the backward-shift would have
|
||||
* however waited for the service of agg before being served.
|
||||
*
|
||||
* The other event that may cause the slot index to be higher than 2
|
||||
* for agg is a recent change of the parameters of some class. If the
|
||||
* weight of a class is increased or the lmax (max_pkt_size) of the
|
||||
* class is decreased, then a new aggregate with smaller slot size
|
||||
* than the original parent aggregate of the class may happen to be
|
||||
* activated. The activation of this aggregate should be properly
|
||||
* delayed to when the service of the class has finished in the ideal
|
||||
* system tracked by QFQ+. If the activation of the aggregate is not
|
||||
* delayed to this reference time instant, then this aggregate may be
|
||||
* unjustly served before other aggregates waiting for service. This
|
||||
* may cause the above bound to the slot index to be violated for some
|
||||
* of these unlucky aggregates.
|
||||
*
|
||||
* Instead of delaying the activation of the new aggregate, which is
|
||||
* quite complex, the following inaccurate but simple solution is used:
|
||||
* if the slot index is higher than QFQ_MAX_SLOTS-2, then the
|
||||
* timestamps of the aggregate are shifted backward so as to let the
|
||||
* slot index become equal to QFQ_MAX_SLOTS-2.
|
||||
* quite complex, the above-discussed capping of the slot index is
|
||||
* used to handle also the consequences of a change of the parameters
|
||||
* of a class.
|
||||
*/
|
||||
static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
|
||||
u64 roundedS)
|
||||
|
@ -1136,7 +1163,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
|
|||
else
|
||||
in_serv_agg->budget -= len;
|
||||
|
||||
q->V += (u64)len * IWSUM;
|
||||
q->V += (u64)len * q->iwsum;
|
||||
pr_debug("qfq dequeue: len %u F %lld now %lld\n",
|
||||
len, (unsigned long long) in_serv_agg->F,
|
||||
(unsigned long long) q->V);
|
||||
|
|
Loading…
Reference in New Issue
Block a user