forked from luck/tmp_suning_uos_patched
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: "Hopefully this is the last batch of networking fixes for 4.14 Fingers crossed... 1) Fix stmmac to use the proper sized OF property read, from Bhadram Varka. 2) Fix use after free in net scheduler tc action code, from Cong Wang. 3) Fix SKB control block mangling in tcp_make_synack(). 4) Use proper locking in fib_dump_info(), from Florian Westphal. 5) Fix IPG encodings in systemport driver, from Florian Fainelli. 6) Fix division by zero in NV TCP congestion control module, from Konstantin Khlebnikov. 7) Fix use after free in nf_reject_ipv4, from Tejaswi Tanikella" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: net: systemport: Correct IPG length settings tcp: do not mangle skb->cb[] in tcp_make_synack() fib: fib_dump_info can no longer use __in_dev_get_rtnl stmmac: use of_property_read_u32 instead of read_u8 net_sched: hold netns refcnt for each action net_sched: acquire RTNL in tc_action_net_exit() net: vrf: correct FRA_L3MDEV encode type tcp_nv: fix division by zero in tcpnv_acked() netfilter: nf_reject_ipv4: Fix use-after-free in send_reset netfilter: nft_set_hash: disable fast_ops for 2-len keys
This commit is contained in:
commit
7ba3ebff9c
|
@ -1809,15 +1809,17 @@ static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
|
|||
|
||||
static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
|
||||
{
|
||||
u32 __maybe_unused reg;
|
||||
u32 reg;
|
||||
|
||||
/* Include Broadcom tag in pad extension */
|
||||
reg = gib_readl(priv, GIB_CONTROL);
|
||||
/* Include Broadcom tag in pad extension and fix up IPG_LENGTH */
|
||||
if (netdev_uses_dsa(priv->netdev)) {
|
||||
reg = gib_readl(priv, GIB_CONTROL);
|
||||
reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
|
||||
reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
|
||||
gib_writel(priv, reg, GIB_CONTROL);
|
||||
}
|
||||
reg &= ~(GIB_IPG_LEN_MASK << GIB_IPG_LEN_SHIFT);
|
||||
reg |= 12 << GIB_IPG_LEN_SHIFT;
|
||||
gib_writel(priv, reg, GIB_CONTROL);
|
||||
}
|
||||
|
||||
static int bcm_sysport_open(struct net_device *dev)
|
||||
|
|
|
@ -168,8 +168,8 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
|
|||
}
|
||||
|
||||
/* Processing RX queues common config */
|
||||
if (of_property_read_u8(rx_node, "snps,rx-queues-to-use",
|
||||
&plat->rx_queues_to_use))
|
||||
if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
|
||||
&plat->rx_queues_to_use))
|
||||
plat->rx_queues_to_use = 1;
|
||||
|
||||
if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
|
||||
|
@ -191,8 +191,8 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
|
|||
else
|
||||
plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
|
||||
|
||||
if (of_property_read_u8(q_node, "snps,map-to-dma-channel",
|
||||
&plat->rx_queues_cfg[queue].chan))
|
||||
if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
|
||||
&plat->rx_queues_cfg[queue].chan))
|
||||
plat->rx_queues_cfg[queue].chan = queue;
|
||||
/* TODO: Dynamic mapping to be included in the future */
|
||||
|
||||
|
@ -222,8 +222,8 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
|
|||
}
|
||||
|
||||
/* Processing TX queues common config */
|
||||
if (of_property_read_u8(tx_node, "snps,tx-queues-to-use",
|
||||
&plat->tx_queues_to_use))
|
||||
if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
|
||||
&plat->tx_queues_to_use))
|
||||
plat->tx_queues_to_use = 1;
|
||||
|
||||
if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
|
||||
|
@ -244,8 +244,8 @@ static void stmmac_mtl_setup(struct platform_device *pdev,
|
|||
if (queue >= plat->tx_queues_to_use)
|
||||
break;
|
||||
|
||||
if (of_property_read_u8(q_node, "snps,weight",
|
||||
&plat->tx_queues_cfg[queue].weight))
|
||||
if (of_property_read_u32(q_node, "snps,weight",
|
||||
&plat->tx_queues_cfg[queue].weight))
|
||||
plat->tx_queues_cfg[queue].weight = 0x10 + queue;
|
||||
|
||||
if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
|
||||
|
|
|
@ -1165,7 +1165,7 @@ static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
|
|||
frh->family = family;
|
||||
frh->action = FR_ACT_TO_TBL;
|
||||
|
||||
if (nla_put_u32(skb, FRA_L3MDEV, 1))
|
||||
if (nla_put_u8(skb, FRA_L3MDEV, 1))
|
||||
goto nla_put_failure;
|
||||
|
||||
if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
|
||||
|
|
|
@ -126,14 +126,14 @@ struct stmmac_axi {
|
|||
|
||||
struct stmmac_rxq_cfg {
|
||||
u8 mode_to_use;
|
||||
u8 chan;
|
||||
u32 chan;
|
||||
u8 pkt_route;
|
||||
bool use_prio;
|
||||
u32 prio;
|
||||
};
|
||||
|
||||
struct stmmac_txq_cfg {
|
||||
u8 weight;
|
||||
u32 weight;
|
||||
u8 mode_to_use;
|
||||
/* Credit Base Shaper parameters */
|
||||
u32 send_slope;
|
||||
|
@ -168,8 +168,8 @@ struct plat_stmmacenet_data {
|
|||
int unicast_filter_entries;
|
||||
int tx_fifo_size;
|
||||
int rx_fifo_size;
|
||||
u8 rx_queues_to_use;
|
||||
u8 tx_queues_to_use;
|
||||
u32 rx_queues_to_use;
|
||||
u32 tx_queues_to_use;
|
||||
u8 rx_sched_algorithm;
|
||||
u8 tx_sched_algorithm;
|
||||
struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
struct tcf_idrinfo {
|
||||
spinlock_t lock;
|
||||
struct idr action_idr;
|
||||
struct net *net;
|
||||
};
|
||||
|
||||
struct tc_action_ops;
|
||||
|
@ -105,7 +106,7 @@ struct tc_action_net {
|
|||
|
||||
static inline
|
||||
int tc_action_net_init(struct tc_action_net *tn,
|
||||
const struct tc_action_ops *ops)
|
||||
const struct tc_action_ops *ops, struct net *net)
|
||||
{
|
||||
int err = 0;
|
||||
|
||||
|
@ -113,6 +114,7 @@ int tc_action_net_init(struct tc_action_net *tn,
|
|||
if (!tn->idrinfo)
|
||||
return -ENOMEM;
|
||||
tn->ops = ops;
|
||||
tn->idrinfo->net = net;
|
||||
spin_lock_init(&tn->idrinfo->lock);
|
||||
idr_init(&tn->idrinfo->action_idr);
|
||||
return err;
|
||||
|
@ -123,7 +125,9 @@ void tcf_idrinfo_destroy(const struct tc_action_ops *ops,
|
|||
|
||||
static inline void tc_action_net_exit(struct tc_action_net *tn)
|
||||
{
|
||||
rtnl_lock();
|
||||
tcf_idrinfo_destroy(tn->ops, tn->idrinfo);
|
||||
rtnl_unlock();
|
||||
kfree(tn->idrinfo);
|
||||
}
|
||||
|
||||
|
|
|
@ -1365,8 +1365,6 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
|
|||
nla_put_in_addr(skb, RTA_PREFSRC, fi->fib_prefsrc))
|
||||
goto nla_put_failure;
|
||||
if (fi->fib_nhs == 1) {
|
||||
struct in_device *in_dev;
|
||||
|
||||
if (fi->fib_nh->nh_gw &&
|
||||
nla_put_in_addr(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
|
||||
goto nla_put_failure;
|
||||
|
@ -1374,10 +1372,14 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
|
|||
nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
|
||||
goto nla_put_failure;
|
||||
if (fi->fib_nh->nh_flags & RTNH_F_LINKDOWN) {
|
||||
in_dev = __in_dev_get_rtnl(fi->fib_nh->nh_dev);
|
||||
struct in_device *in_dev;
|
||||
|
||||
rcu_read_lock();
|
||||
in_dev = __in_dev_get_rcu(fi->fib_nh->nh_dev);
|
||||
if (in_dev &&
|
||||
IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
|
||||
rtm->rtm_flags |= RTNH_F_DEAD;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
if (fi->fib_nh->nh_flags & RTNH_F_OFFLOAD)
|
||||
rtm->rtm_flags |= RTNH_F_OFFLOAD;
|
||||
|
@ -1400,18 +1402,20 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
|
|||
goto nla_put_failure;
|
||||
|
||||
for_nexthops(fi) {
|
||||
struct in_device *in_dev;
|
||||
|
||||
rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
|
||||
if (!rtnh)
|
||||
goto nla_put_failure;
|
||||
|
||||
rtnh->rtnh_flags = nh->nh_flags & 0xFF;
|
||||
if (nh->nh_flags & RTNH_F_LINKDOWN) {
|
||||
in_dev = __in_dev_get_rtnl(nh->nh_dev);
|
||||
struct in_device *in_dev;
|
||||
|
||||
rcu_read_lock();
|
||||
in_dev = __in_dev_get_rcu(nh->nh_dev);
|
||||
if (in_dev &&
|
||||
IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev))
|
||||
rtnh->rtnh_flags |= RTNH_F_DEAD;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
rtnh->rtnh_hops = nh->nh_weight - 1;
|
||||
rtnh->rtnh_ifindex = nh->nh_oif;
|
||||
|
|
|
@ -132,6 +132,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
|
|||
if (ip_route_me_harder(net, nskb, RTN_UNSPEC))
|
||||
goto free_nskb;
|
||||
|
||||
niph = ip_hdr(nskb);
|
||||
|
||||
/* "Never happens" */
|
||||
if (nskb->len > dst_mtu(skb_dst(nskb)))
|
||||
goto free_nskb;
|
||||
|
|
|
@ -252,7 +252,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
|
|||
|
||||
/* rate in 100's bits per second */
|
||||
rate64 = ((u64)sample->in_flight) * 8000000;
|
||||
rate = (u32)div64_u64(rate64, (u64)(avg_rtt * 100));
|
||||
rate = (u32)div64_u64(rate64, (u64)(avg_rtt ?: 1) * 100);
|
||||
|
||||
/* Remember the maximum rate seen during this RTT
|
||||
* Note: It may be more than one RTT. This function should be
|
||||
|
|
|
@ -3180,13 +3180,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
|
|||
th->source = htons(ireq->ir_num);
|
||||
th->dest = ireq->ir_rmt_port;
|
||||
skb->mark = ireq->ir_mark;
|
||||
/* Setting of flags are superfluous here for callers (and ECE is
|
||||
* not even correctly set)
|
||||
*/
|
||||
tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
|
||||
TCPHDR_SYN | TCPHDR_ACK);
|
||||
|
||||
th->seq = htonl(TCP_SKB_CB(skb)->seq);
|
||||
skb->ip_summed = CHECKSUM_PARTIAL;
|
||||
th->seq = htonl(tcp_rsk(req)->snt_isn);
|
||||
/* XXX data is queued and acked as is. No buffer/window check */
|
||||
th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
|
||||
|
||||
|
|
|
@ -643,7 +643,6 @@ nft_hash_select_ops(const struct nft_ctx *ctx, const struct nft_set_desc *desc,
|
|||
{
|
||||
if (desc->size) {
|
||||
switch (desc->klen) {
|
||||
case 2:
|
||||
case 4:
|
||||
return &nft_hash_fast_ops;
|
||||
default:
|
||||
|
|
|
@ -78,6 +78,7 @@ static void tcf_idr_remove(struct tcf_idrinfo *idrinfo, struct tc_action *p)
|
|||
spin_lock_bh(&idrinfo->lock);
|
||||
idr_remove_ext(&idrinfo->action_idr, p->tcfa_index);
|
||||
spin_unlock_bh(&idrinfo->lock);
|
||||
put_net(idrinfo->net);
|
||||
gen_kill_estimator(&p->tcfa_rate_est);
|
||||
free_tcf(p);
|
||||
}
|
||||
|
@ -86,6 +87,8 @@ int __tcf_idr_release(struct tc_action *p, bool bind, bool strict)
|
|||
{
|
||||
int ret = 0;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
if (p) {
|
||||
if (bind)
|
||||
p->tcfa_bindcnt--;
|
||||
|
@ -334,6 +337,7 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
|
|||
p->idrinfo = idrinfo;
|
||||
p->ops = ops;
|
||||
INIT_LIST_HEAD(&p->list);
|
||||
get_net(idrinfo->net);
|
||||
*a = p;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -398,7 +398,7 @@ static __net_init int bpf_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, bpf_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_bpf_ops);
|
||||
return tc_action_net_init(tn, &act_bpf_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit bpf_exit_net(struct net *net)
|
||||
|
|
|
@ -206,7 +206,7 @@ static __net_init int connmark_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, connmark_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_connmark_ops);
|
||||
return tc_action_net_init(tn, &act_connmark_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit connmark_exit_net(struct net *net)
|
||||
|
|
|
@ -626,7 +626,7 @@ static __net_init int csum_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, csum_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_csum_ops);
|
||||
return tc_action_net_init(tn, &act_csum_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit csum_exit_net(struct net *net)
|
||||
|
|
|
@ -232,7 +232,7 @@ static __net_init int gact_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, gact_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_gact_ops);
|
||||
return tc_action_net_init(tn, &act_gact_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit gact_exit_net(struct net *net)
|
||||
|
|
|
@ -818,7 +818,7 @@ static __net_init int ife_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, ife_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_ife_ops);
|
||||
return tc_action_net_init(tn, &act_ife_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit ife_exit_net(struct net *net)
|
||||
|
|
|
@ -334,7 +334,7 @@ static __net_init int ipt_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, ipt_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_ipt_ops);
|
||||
return tc_action_net_init(tn, &act_ipt_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit ipt_exit_net(struct net *net)
|
||||
|
@ -384,7 +384,7 @@ static __net_init int xt_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, xt_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_xt_ops);
|
||||
return tc_action_net_init(tn, &act_xt_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit xt_exit_net(struct net *net)
|
||||
|
|
|
@ -343,7 +343,7 @@ static __net_init int mirred_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, mirred_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_mirred_ops);
|
||||
return tc_action_net_init(tn, &act_mirred_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit mirred_exit_net(struct net *net)
|
||||
|
|
|
@ -307,7 +307,7 @@ static __net_init int nat_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, nat_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_nat_ops);
|
||||
return tc_action_net_init(tn, &act_nat_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit nat_exit_net(struct net *net)
|
||||
|
|
|
@ -450,7 +450,7 @@ static __net_init int pedit_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, pedit_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_pedit_ops);
|
||||
return tc_action_net_init(tn, &act_pedit_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit pedit_exit_net(struct net *net)
|
||||
|
|
|
@ -331,7 +331,7 @@ static __net_init int police_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, police_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_police_ops);
|
||||
return tc_action_net_init(tn, &act_police_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit police_exit_net(struct net *net)
|
||||
|
|
|
@ -240,7 +240,7 @@ static __net_init int sample_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, sample_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_sample_ops);
|
||||
return tc_action_net_init(tn, &act_sample_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit sample_exit_net(struct net *net)
|
||||
|
|
|
@ -201,7 +201,7 @@ static __net_init int simp_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, simp_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_simp_ops);
|
||||
return tc_action_net_init(tn, &act_simp_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit simp_exit_net(struct net *net)
|
||||
|
|
|
@ -238,7 +238,7 @@ static __net_init int skbedit_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, skbedit_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_skbedit_ops);
|
||||
return tc_action_net_init(tn, &act_skbedit_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit skbedit_exit_net(struct net *net)
|
||||
|
|
|
@ -263,7 +263,7 @@ static __net_init int skbmod_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, skbmod_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_skbmod_ops);
|
||||
return tc_action_net_init(tn, &act_skbmod_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit skbmod_exit_net(struct net *net)
|
||||
|
|
|
@ -322,7 +322,7 @@ static __net_init int tunnel_key_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, tunnel_key_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_tunnel_key_ops);
|
||||
return tc_action_net_init(tn, &act_tunnel_key_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit tunnel_key_exit_net(struct net *net)
|
||||
|
|
|
@ -269,7 +269,7 @@ static __net_init int vlan_init_net(struct net *net)
|
|||
{
|
||||
struct tc_action_net *tn = net_generic(net, vlan_net_id);
|
||||
|
||||
return tc_action_net_init(tn, &act_vlan_ops);
|
||||
return tc_action_net_init(tn, &act_vlan_ops, net);
|
||||
}
|
||||
|
||||
static void __net_exit vlan_exit_net(struct net *net)
|
||||
|
|
Loading…
Reference in New Issue
Block a user