forked from luck/tmp_suning_uos_patched
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) Fix checksumming regressions, from Tom Herbert. 2) Undo unintentional permissions changes for SCTP rto_alpha and rto_beta sysfs knobs, from Denial Borkmann. 3) VXLAN, like other IP tunnels, should advertize it's encapsulation size using dev->needed_headroom instead of dev->hard_header_len. From Cong Wang. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: net: sctp: fix permissions for rto_alpha and rto_beta knobs vxlan: Checksum fixes net: add skb_pop_rcv_encapsulation udp: call __skb_checksum_complete when doing full checksum net: Fix save software checksum complete net: Fix GSO constants to match NETIF flags udp: ipv4: do not waste time in __udp4_lib_mcast_demux_lookup vxlan: use dev->needed_headroom instead of dev->hard_header_len MAINTAINERS: update cxgb4 maintainer
This commit is contained in:
commit
a9be22425e
|
@ -2594,7 +2594,7 @@ S: Supported
|
||||||
F: drivers/infiniband/hw/cxgb3/
|
F: drivers/infiniband/hw/cxgb3/
|
||||||
|
|
||||||
CXGB4 ETHERNET DRIVER (CXGB4)
|
CXGB4 ETHERNET DRIVER (CXGB4)
|
||||||
M: Dimitris Michailidis <dm@chelsio.com>
|
M: Hariprasad S <hariprasad@chelsio.com>
|
||||||
L: netdev@vger.kernel.org
|
L: netdev@vger.kernel.org
|
||||||
W: http://www.chelsio.com
|
W: http://www.chelsio.com
|
||||||
S: Supported
|
S: Supported
|
||||||
|
|
|
@ -1156,15 +1156,7 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
|
||||||
if (!vs)
|
if (!vs)
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
/* If the NIC driver gave us an encapsulated packet
|
skb_pop_rcv_encapsulation(skb);
|
||||||
* with the encapsulation mark, the device checksummed it
|
|
||||||
* for us. Otherwise force the upper layers to verify it.
|
|
||||||
*/
|
|
||||||
if ((skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_PARTIAL) ||
|
|
||||||
!skb->encapsulation)
|
|
||||||
skb->ip_summed = CHECKSUM_NONE;
|
|
||||||
|
|
||||||
skb->encapsulation = 0;
|
|
||||||
|
|
||||||
vs->rcv(vs, skb, vxh->vx_vni);
|
vs->rcv(vs, skb, vxh->vx_vni);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1201,6 +1193,7 @@ static void vxlan_rcv(struct vxlan_sock *vs,
|
||||||
skb_reset_mac_header(skb);
|
skb_reset_mac_header(skb);
|
||||||
skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
|
skb_scrub_packet(skb, !net_eq(vxlan->net, dev_net(vxlan->dev)));
|
||||||
skb->protocol = eth_type_trans(skb, vxlan->dev);
|
skb->protocol = eth_type_trans(skb, vxlan->dev);
|
||||||
|
skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
|
||||||
|
|
||||||
/* Ignore packet loops (and multicast echo) */
|
/* Ignore packet loops (and multicast echo) */
|
||||||
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
|
if (ether_addr_equal(eth_hdr(skb)->h_source, vxlan->dev->dev_addr))
|
||||||
|
@ -2247,9 +2240,9 @@ static void vxlan_setup(struct net_device *dev)
|
||||||
eth_hw_addr_random(dev);
|
eth_hw_addr_random(dev);
|
||||||
ether_setup(dev);
|
ether_setup(dev);
|
||||||
if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
|
if (vxlan->default_dst.remote_ip.sa.sa_family == AF_INET6)
|
||||||
dev->hard_header_len = ETH_HLEN + VXLAN6_HEADROOM;
|
dev->needed_headroom = ETH_HLEN + VXLAN6_HEADROOM;
|
||||||
else
|
else
|
||||||
dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
|
dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
|
||||||
|
|
||||||
dev->netdev_ops = &vxlan_netdev_ops;
|
dev->netdev_ops = &vxlan_netdev_ops;
|
||||||
dev->destructor = free_netdev;
|
dev->destructor = free_netdev;
|
||||||
|
@ -2646,8 +2639,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
|
||||||
if (!tb[IFLA_MTU])
|
if (!tb[IFLA_MTU])
|
||||||
dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
|
dev->mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
|
||||||
|
|
||||||
/* update header length based on lower device */
|
dev->needed_headroom = lowerdev->hard_header_len +
|
||||||
dev->hard_header_len = lowerdev->hard_header_len +
|
|
||||||
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
|
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
|
||||||
} else if (use_ipv6)
|
} else if (use_ipv6)
|
||||||
vxlan->flags |= VXLAN_F_IPV6;
|
vxlan->flags |= VXLAN_F_IPV6;
|
||||||
|
|
|
@ -117,6 +117,7 @@ enum {
|
||||||
#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
|
#define NETIF_F_GSO_IPIP __NETIF_F(GSO_IPIP)
|
||||||
#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
|
#define NETIF_F_GSO_SIT __NETIF_F(GSO_SIT)
|
||||||
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
|
#define NETIF_F_GSO_UDP_TUNNEL __NETIF_F(GSO_UDP_TUNNEL)
|
||||||
|
#define NETIF_F_GSO_UDP_TUNNEL_CSUM __NETIF_F(GSO_UDP_TUNNEL_CSUM)
|
||||||
#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
|
#define NETIF_F_GSO_MPLS __NETIF_F(GSO_MPLS)
|
||||||
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
|
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
|
||||||
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
|
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
|
||||||
|
|
|
@ -3305,6 +3305,13 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
|
||||||
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
|
BUILD_BUG_ON(SKB_GSO_TCP_ECN != (NETIF_F_TSO_ECN >> NETIF_F_GSO_SHIFT));
|
||||||
BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
|
BUILD_BUG_ON(SKB_GSO_TCPV6 != (NETIF_F_TSO6 >> NETIF_F_GSO_SHIFT));
|
||||||
BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
|
BUILD_BUG_ON(SKB_GSO_FCOE != (NETIF_F_FSO >> NETIF_F_GSO_SHIFT));
|
||||||
|
BUILD_BUG_ON(SKB_GSO_GRE != (NETIF_F_GSO_GRE >> NETIF_F_GSO_SHIFT));
|
||||||
|
BUILD_BUG_ON(SKB_GSO_GRE_CSUM != (NETIF_F_GSO_GRE_CSUM >> NETIF_F_GSO_SHIFT));
|
||||||
|
BUILD_BUG_ON(SKB_GSO_IPIP != (NETIF_F_GSO_IPIP >> NETIF_F_GSO_SHIFT));
|
||||||
|
BUILD_BUG_ON(SKB_GSO_SIT != (NETIF_F_GSO_SIT >> NETIF_F_GSO_SHIFT));
|
||||||
|
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL != (NETIF_F_GSO_UDP_TUNNEL >> NETIF_F_GSO_SHIFT));
|
||||||
|
BUILD_BUG_ON(SKB_GSO_UDP_TUNNEL_CSUM != (NETIF_F_GSO_UDP_TUNNEL_CSUM >> NETIF_F_GSO_SHIFT));
|
||||||
|
BUILD_BUG_ON(SKB_GSO_MPLS != (NETIF_F_GSO_MPLS >> NETIF_F_GSO_SHIFT));
|
||||||
|
|
||||||
return (features & feature) == feature;
|
return (features & feature) == feature;
|
||||||
}
|
}
|
||||||
|
|
|
@ -338,17 +338,18 @@ enum {
|
||||||
|
|
||||||
SKB_GSO_GRE = 1 << 6,
|
SKB_GSO_GRE = 1 << 6,
|
||||||
|
|
||||||
SKB_GSO_IPIP = 1 << 7,
|
SKB_GSO_GRE_CSUM = 1 << 7,
|
||||||
|
|
||||||
SKB_GSO_SIT = 1 << 8,
|
SKB_GSO_IPIP = 1 << 8,
|
||||||
|
|
||||||
SKB_GSO_UDP_TUNNEL = 1 << 9,
|
SKB_GSO_SIT = 1 << 9,
|
||||||
|
|
||||||
SKB_GSO_MPLS = 1 << 10,
|
SKB_GSO_UDP_TUNNEL = 1 << 10,
|
||||||
|
|
||||||
SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
|
SKB_GSO_UDP_TUNNEL_CSUM = 1 << 11,
|
||||||
|
|
||||||
SKB_GSO_GRE_CSUM = 1 << 12,
|
SKB_GSO_MPLS = 1 << 12,
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#if BITS_PER_LONG > 32
|
#if BITS_PER_LONG > 32
|
||||||
|
@ -1853,6 +1854,18 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
|
||||||
return pskb_may_pull(skb, skb_network_offset(skb) + len);
|
return pskb_may_pull(skb, skb_network_offset(skb) + len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline void skb_pop_rcv_encapsulation(struct sk_buff *skb)
|
||||||
|
{
|
||||||
|
/* Only continue with checksum unnecessary if device indicated
|
||||||
|
* it is valid across encapsulation (skb->encapsulation was set).
|
||||||
|
*/
|
||||||
|
if (skb->ip_summed == CHECKSUM_UNNECESSARY && !skb->encapsulation)
|
||||||
|
skb->ip_summed = CHECKSUM_NONE;
|
||||||
|
|
||||||
|
skb->encapsulation = 0;
|
||||||
|
skb->csum_valid = 0;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* CPUs often take a performance hit when accessing unaligned memory
|
* CPUs often take a performance hit when accessing unaligned memory
|
||||||
* locations. The actual performance hit varies, it can be small if the
|
* locations. The actual performance hit varies, it can be small if the
|
||||||
|
|
|
@ -111,7 +111,9 @@ struct sk_buff;
|
||||||
*/
|
*/
|
||||||
static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
|
static inline __sum16 __udp_lib_checksum_complete(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return __skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov);
|
return (UDP_SKB_CB(skb)->cscov == skb->len ?
|
||||||
|
__skb_checksum_complete(skb) :
|
||||||
|
__skb_checksum_complete_head(skb, UDP_SKB_CB(skb)->cscov));
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int udp_lib_checksum_complete(struct sk_buff *skb)
|
static inline int udp_lib_checksum_complete(struct sk_buff *skb)
|
||||||
|
|
|
@ -739,22 +739,38 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
|
||||||
__sum16 sum;
|
__sum16 sum;
|
||||||
|
|
||||||
sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
|
sum = csum_fold(skb_checksum(skb, 0, len, skb->csum));
|
||||||
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) && !sum &&
|
if (likely(!sum)) {
|
||||||
!skb->csum_complete_sw)
|
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
|
||||||
netdev_rx_csum_fault(skb->dev);
|
!skb->csum_complete_sw)
|
||||||
|
netdev_rx_csum_fault(skb->dev);
|
||||||
/* Save checksum complete for later use */
|
}
|
||||||
skb->csum = sum;
|
skb->csum_valid = !sum;
|
||||||
skb->ip_summed = CHECKSUM_COMPLETE;
|
|
||||||
skb->csum_complete_sw = 1;
|
|
||||||
|
|
||||||
return sum;
|
return sum;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__skb_checksum_complete_head);
|
EXPORT_SYMBOL(__skb_checksum_complete_head);
|
||||||
|
|
||||||
__sum16 __skb_checksum_complete(struct sk_buff *skb)
|
__sum16 __skb_checksum_complete(struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
return __skb_checksum_complete_head(skb, skb->len);
|
__wsum csum;
|
||||||
|
__sum16 sum;
|
||||||
|
|
||||||
|
csum = skb_checksum(skb, 0, skb->len, 0);
|
||||||
|
|
||||||
|
/* skb->csum holds pseudo checksum */
|
||||||
|
sum = csum_fold(csum_add(skb->csum, csum));
|
||||||
|
if (likely(!sum)) {
|
||||||
|
if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
|
||||||
|
!skb->csum_complete_sw)
|
||||||
|
netdev_rx_csum_fault(skb->dev);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Save full packet checksum */
|
||||||
|
skb->csum = csum;
|
||||||
|
skb->ip_summed = CHECKSUM_COMPLETE;
|
||||||
|
skb->csum_complete_sw = 1;
|
||||||
|
skb->csum_valid = !sum;
|
||||||
|
|
||||||
|
return sum;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(__skb_checksum_complete);
|
EXPORT_SYMBOL(__skb_checksum_complete);
|
||||||
|
|
||||||
|
|
|
@ -689,6 +689,9 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
|
||||||
new->ooo_okay = old->ooo_okay;
|
new->ooo_okay = old->ooo_okay;
|
||||||
new->no_fcs = old->no_fcs;
|
new->no_fcs = old->no_fcs;
|
||||||
new->encapsulation = old->encapsulation;
|
new->encapsulation = old->encapsulation;
|
||||||
|
new->encap_hdr_csum = old->encap_hdr_csum;
|
||||||
|
new->csum_valid = old->csum_valid;
|
||||||
|
new->csum_complete_sw = old->csum_complete_sw;
|
||||||
#ifdef CONFIG_XFRM
|
#ifdef CONFIG_XFRM
|
||||||
new->sp = secpath_get(old->sp);
|
new->sp = secpath_get(old->sp);
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1861,6 +1861,10 @@ static struct sock *__udp4_lib_mcast_demux_lookup(struct net *net,
|
||||||
unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
|
unsigned int count, slot = udp_hashfn(net, hnum, udp_table.mask);
|
||||||
struct udp_hslot *hslot = &udp_table.hash[slot];
|
struct udp_hslot *hslot = &udp_table.hash[slot];
|
||||||
|
|
||||||
|
/* Do not bother scanning a too big list */
|
||||||
|
if (hslot->count > 10)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
begin:
|
begin:
|
||||||
count = 0;
|
count = 0;
|
||||||
|
|
|
@ -34,6 +34,8 @@
|
||||||
* Sridhar Samudrala <sri@us.ibm.com>
|
* Sridhar Samudrala <sri@us.ibm.com>
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||||
|
|
||||||
#include <net/sctp/structs.h>
|
#include <net/sctp/structs.h>
|
||||||
#include <net/sctp/sctp.h>
|
#include <net/sctp/sctp.h>
|
||||||
#include <linux/sysctl.h>
|
#include <linux/sysctl.h>
|
||||||
|
@ -46,6 +48,11 @@ static int sack_timer_min = 1;
|
||||||
static int sack_timer_max = 500;
|
static int sack_timer_max = 500;
|
||||||
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
|
static int addr_scope_max = 3; /* check sctp_scope_policy_t in include/net/sctp/constants.h for max entries */
|
||||||
static int rwnd_scale_max = 16;
|
static int rwnd_scale_max = 16;
|
||||||
|
static int rto_alpha_min = 0;
|
||||||
|
static int rto_beta_min = 0;
|
||||||
|
static int rto_alpha_max = 1000;
|
||||||
|
static int rto_beta_max = 1000;
|
||||||
|
|
||||||
static unsigned long max_autoclose_min = 0;
|
static unsigned long max_autoclose_min = 0;
|
||||||
static unsigned long max_autoclose_max =
|
static unsigned long max_autoclose_max =
|
||||||
(MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
|
(MAX_SCHEDULE_TIMEOUT / HZ > UINT_MAX)
|
||||||
|
@ -64,6 +71,9 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
|
||||||
static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
|
static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
|
||||||
void __user *buffer, size_t *lenp,
|
void __user *buffer, size_t *lenp,
|
||||||
loff_t *ppos);
|
loff_t *ppos);
|
||||||
|
static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
|
||||||
|
void __user *buffer, size_t *lenp,
|
||||||
|
loff_t *ppos);
|
||||||
static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
|
static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
|
||||||
void __user *buffer, size_t *lenp,
|
void __user *buffer, size_t *lenp,
|
||||||
loff_t *ppos);
|
loff_t *ppos);
|
||||||
|
@ -126,15 +136,19 @@ static struct ctl_table sctp_net_table[] = {
|
||||||
.procname = "rto_alpha_exp_divisor",
|
.procname = "rto_alpha_exp_divisor",
|
||||||
.data = &init_net.sctp.rto_alpha,
|
.data = &init_net.sctp.rto_alpha,
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(int),
|
||||||
.mode = 0444,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_sctp_do_alpha_beta,
|
||||||
|
.extra1 = &rto_alpha_min,
|
||||||
|
.extra2 = &rto_alpha_max,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "rto_beta_exp_divisor",
|
.procname = "rto_beta_exp_divisor",
|
||||||
.data = &init_net.sctp.rto_beta,
|
.data = &init_net.sctp.rto_beta,
|
||||||
.maxlen = sizeof(int),
|
.maxlen = sizeof(int),
|
||||||
.mode = 0444,
|
.mode = 0644,
|
||||||
.proc_handler = proc_dointvec,
|
.proc_handler = proc_sctp_do_alpha_beta,
|
||||||
|
.extra1 = &rto_beta_min,
|
||||||
|
.extra2 = &rto_beta_max,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.procname = "max_burst",
|
.procname = "max_burst",
|
||||||
|
@ -403,6 +417,16 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
|
||||||
|
void __user *buffer, size_t *lenp,
|
||||||
|
loff_t *ppos)
|
||||||
|
{
|
||||||
|
pr_warn_once("Changing rto_alpha or rto_beta may lead to "
|
||||||
|
"suboptimal rtt/srtt estimations!\n");
|
||||||
|
|
||||||
|
return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
|
||||||
|
}
|
||||||
|
|
||||||
static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
|
static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
|
||||||
void __user *buffer, size_t *lenp,
|
void __user *buffer, size_t *lenp,
|
||||||
loff_t *ppos)
|
loff_t *ppos)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user