Magellan Linux

Annotation of /trunk/kernel-magellan/patches-4.16/0109-4.16.10-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3113 - (hide annotations) (download)
Tue May 22 11:46:30 2018 UTC (6 years ago) by niro
File size: 61509 byte(s)
-linux-4.16.10
1 niro 3113 diff --git a/Makefile b/Makefile
2     index ea3cb221d7c5..33f3c94f02ca 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 16
9     -SUBLEVEL = 9
10     +SUBLEVEL = 10
11     EXTRAVERSION =
12     NAME = Fearless Coyote
13    
14     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
15     index 7f8bda3a2005..0881f7907848 100644
16     --- a/drivers/infiniband/hw/mlx5/main.c
17     +++ b/drivers/infiniband/hw/mlx5/main.c
18     @@ -4303,7 +4303,7 @@ mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
19     {
20     struct mlx5_ib_dev *dev = to_mdev(ibdev);
21    
22     - return mlx5_get_vector_affinity(dev->mdev, comp_vector);
23     + return mlx5_get_vector_affinity_hint(dev->mdev, comp_vector);
24     }
25    
26     /* The mlx5_ib_multiport_mutex should be held when calling this function */
27     diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
28     index 1ed9529e7bd1..5eb0df2e5464 100644
29     --- a/drivers/net/bonding/bond_alb.c
30     +++ b/drivers/net/bonding/bond_alb.c
31     @@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
32     {
33     int i;
34    
35     - if (!client_info->slave)
36     + if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
37     return;
38    
39     for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
40     @@ -943,6 +943,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
41     skb->priority = TC_PRIO_CONTROL;
42     skb->dev = slave->dev;
43    
44     + netdev_dbg(slave->bond->dev,
45     + "Send learning packet: dev %s mac %pM vlan %d\n",
46     + slave->dev->name, mac_addr, vid);
47     +
48     if (vid)
49     __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
50    
51     @@ -965,14 +969,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
52     u8 *mac_addr = data->mac_addr;
53     struct bond_vlan_tag *tags;
54    
55     - if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
56     - if (strict_match &&
57     - ether_addr_equal_64bits(mac_addr,
58     - upper->dev_addr)) {
59     + if (is_vlan_dev(upper) &&
60     + bond->nest_level == vlan_get_encap_level(upper) - 1) {
61     + if (upper->addr_assign_type == NET_ADDR_STOLEN) {
62     alb_send_lp_vid(slave, mac_addr,
63     vlan_dev_vlan_proto(upper),
64     vlan_dev_vlan_id(upper));
65     - } else if (!strict_match) {
66     + } else {
67     alb_send_lp_vid(slave, upper->dev_addr,
68     vlan_dev_vlan_proto(upper),
69     vlan_dev_vlan_id(upper));
70     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
71     index 718e4914e3a0..1f1e97b26f95 100644
72     --- a/drivers/net/bonding/bond_main.c
73     +++ b/drivers/net/bonding/bond_main.c
74     @@ -1738,6 +1738,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
75     if (bond_mode_uses_xmit_hash(bond))
76     bond_update_slave_arr(bond, NULL);
77    
78     + bond->nest_level = dev_get_nest_level(bond_dev);
79     +
80     netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
81     slave_dev->name,
82     bond_is_active_slave(new_slave) ? "an active" : "a backup",
83     diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
84     index 3fc549b88c43..d61e51ebca51 100644
85     --- a/drivers/net/ethernet/broadcom/bcmsysport.c
86     +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
87     @@ -2052,14 +2052,21 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
88     .ndo_select_queue = bcm_sysport_select_queue,
89     };
90    
91     -static int bcm_sysport_map_queues(struct net_device *dev,
92     +static int bcm_sysport_map_queues(struct notifier_block *nb,
93     struct dsa_notifier_register_info *info)
94     {
95     - struct bcm_sysport_priv *priv = netdev_priv(dev);
96     struct bcm_sysport_tx_ring *ring;
97     + struct bcm_sysport_priv *priv;
98     struct net_device *slave_dev;
99     unsigned int num_tx_queues;
100     unsigned int q, start, port;
101     + struct net_device *dev;
102     +
103     + priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier);
104     + if (priv->netdev != info->master)
105     + return 0;
106     +
107     + dev = info->master;
108    
109     /* We can't be setting up queue inspection for non directly attached
110     * switches
111     @@ -2082,6 +2089,7 @@ static int bcm_sysport_map_queues(struct net_device *dev,
112     if (priv->is_lite)
113     netif_set_real_num_tx_queues(slave_dev,
114     slave_dev->num_tx_queues / 2);
115     +
116     num_tx_queues = slave_dev->real_num_tx_queues;
117    
118     if (priv->per_port_num_tx_queues &&
119     @@ -2109,7 +2117,7 @@ static int bcm_sysport_map_queues(struct net_device *dev,
120     return 0;
121     }
122    
123     -static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
124     +static int bcm_sysport_dsa_notifier(struct notifier_block *nb,
125     unsigned long event, void *ptr)
126     {
127     struct dsa_notifier_register_info *info;
128     @@ -2119,7 +2127,7 @@ static int bcm_sysport_dsa_notifier(struct notifier_block *unused,
129    
130     info = ptr;
131    
132     - return notifier_from_errno(bcm_sysport_map_queues(info->master, info));
133     + return notifier_from_errno(bcm_sysport_map_queues(nb, info));
134     }
135    
136     #define REV_FMT "v%2x.%02x"
137     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
138     index f2593978ae75..bde98a994e96 100644
139     --- a/drivers/net/ethernet/broadcom/tg3.c
140     +++ b/drivers/net/ethernet/broadcom/tg3.c
141     @@ -8733,14 +8733,15 @@ static void tg3_free_consistent(struct tg3 *tp)
142     tg3_mem_rx_release(tp);
143     tg3_mem_tx_release(tp);
144    
145     - /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
146     - tg3_full_lock(tp, 0);
147     + /* tp->hw_stats can be referenced safely:
148     + * 1. under rtnl_lock
149     + * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
150     + */
151     if (tp->hw_stats) {
152     dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
153     tp->hw_stats, tp->stats_mapping);
154     tp->hw_stats = NULL;
155     }
156     - tg3_full_unlock(tp);
157     }
158    
159     /*
160     @@ -14178,7 +14179,7 @@ static void tg3_get_stats64(struct net_device *dev,
161     struct tg3 *tp = netdev_priv(dev);
162    
163     spin_lock_bh(&tp->lock);
164     - if (!tp->hw_stats) {
165     + if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
166     *stats = tp->net_stats_prev;
167     spin_unlock_bh(&tp->lock);
168     return;
169     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
170     index f3302edba8b4..7f87db9734b8 100644
171     --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
172     +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
173     @@ -1013,6 +1013,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
174     if (!coal->tx_max_coalesced_frames_irq)
175     return -EINVAL;
176    
177     + if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
178     + coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
179     + coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
180     + coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
181     + netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
182     + __func__, MLX4_EN_MAX_COAL_TIME);
183     + return -ERANGE;
184     + }
185     +
186     + if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
187     + coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
188     + netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
189     + __func__, MLX4_EN_MAX_COAL_PKTS);
190     + return -ERANGE;
191     + }
192     +
193     priv->rx_frames = (coal->rx_max_coalesced_frames ==
194     MLX4_EN_AUTO_CONF) ?
195     MLX4_EN_RX_COAL_TARGET :
196     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
197     index 8fc51bc29003..41f8fbced11d 100644
198     --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
199     +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
200     @@ -3320,12 +3320,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
201     MAX_TX_RINGS, GFP_KERNEL);
202     if (!priv->tx_ring[t]) {
203     err = -ENOMEM;
204     - goto err_free_tx;
205     + goto out;
206     }
207     priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
208     MAX_TX_RINGS, GFP_KERNEL);
209     if (!priv->tx_cq[t]) {
210     - kfree(priv->tx_ring[t]);
211     err = -ENOMEM;
212     goto out;
213     }
214     @@ -3578,11 +3577,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
215    
216     return 0;
217    
218     -err_free_tx:
219     - while (t--) {
220     - kfree(priv->tx_ring[t]);
221     - kfree(priv->tx_cq[t]);
222     - }
223     out:
224     mlx4_en_destroy_netdev(dev);
225     return err;
226     diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
227     index f470ae37d937..4dabaf025b12 100644
228     --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
229     +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
230     @@ -132,6 +132,9 @@
231     #define MLX4_EN_TX_COAL_PKTS 16
232     #define MLX4_EN_TX_COAL_TIME 0x10
233    
234     +#define MLX4_EN_MAX_COAL_PKTS U16_MAX
235     +#define MLX4_EN_MAX_COAL_TIME U16_MAX
236     +
237     #define MLX4_EN_RX_RATE_LOW 400000
238     #define MLX4_EN_RX_COAL_TIME_LOW 0
239     #define MLX4_EN_RX_RATE_HIGH 450000
240     @@ -552,8 +555,8 @@ struct mlx4_en_priv {
241     u16 rx_usecs_low;
242     u32 pkt_rate_high;
243     u16 rx_usecs_high;
244     - u16 sample_interval;
245     - u16 adaptive_rx_coal;
246     + u32 sample_interval;
247     + u32 adaptive_rx_coal;
248     u32 msg_enable;
249     u32 loopback_ok;
250     u32 validate_loopback;
251     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
252     index 3d46ef48d5b8..c641d5656b2d 100644
253     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
254     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
255     @@ -1007,12 +1007,14 @@ static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
256    
257     mutex_lock(&priv->state_lock);
258    
259     - if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
260     - goto out;
261     -
262     new_channels.params = priv->channels.params;
263     mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
264    
265     + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
266     + priv->channels.params = new_channels.params;
267     + goto out;
268     + }
269     +
270     /* Skip if tx_min_inline is the same */
271     if (new_channels.params.tx_min_inline_mode ==
272     priv->channels.params.tx_min_inline_mode)
273     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
274     index 43234cabf444..8665670fddbc 100644
275     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
276     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
277     @@ -1260,6 +1260,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
278     f->mask);
279     addr_type = key->addr_type;
280    
281     + /* the HW doesn't support frag first/later */
282     + if (mask->flags & FLOW_DIS_FIRST_FRAG)
283     + return -EOPNOTSUPP;
284     +
285     if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
286     MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
287     MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
288     @@ -1863,7 +1867,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
289     }
290    
291     ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
292     - if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
293     + if (modify_ip_header && ip_proto != IPPROTO_TCP &&
294     + ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
295     pr_info("can't offload re-write of ip proto %d\n", ip_proto);
296     return false;
297     }
298     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
299     index 11b4f1089d1c..ea725664f4f2 100644
300     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
301     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
302     @@ -255,7 +255,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
303     dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
304     DMA_TO_DEVICE);
305     if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
306     - return -ENOMEM;
307     + goto dma_unmap_wqe_err;
308    
309     dseg->addr = cpu_to_be64(dma_addr);
310     dseg->lkey = sq->mkey_be;
311     @@ -273,7 +273,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
312     dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
313     DMA_TO_DEVICE);
314     if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
315     - return -ENOMEM;
316     + goto dma_unmap_wqe_err;
317    
318     dseg->addr = cpu_to_be64(dma_addr);
319     dseg->lkey = sq->mkey_be;
320     @@ -285,6 +285,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
321     }
322    
323     return num_dma;
324     +
325     +dma_unmap_wqe_err:
326     + mlx5e_dma_unmap_wqe_err(sq, num_dma);
327     + return -ENOMEM;
328     }
329    
330     static inline void
331     @@ -380,17 +384,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
332     num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
333     (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
334     if (unlikely(num_dma < 0))
335     - goto dma_unmap_wqe_err;
336     + goto err_drop;
337    
338     mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
339     num_bytes, num_dma, wi, cseg);
340    
341     return NETDEV_TX_OK;
342    
343     -dma_unmap_wqe_err:
344     +err_drop:
345     sq->stats.dropped++;
346     - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
347     -
348     dev_kfree_skb_any(skb);
349    
350     return NETDEV_TX_OK;
351     @@ -620,17 +622,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
352     num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
353     (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
354     if (unlikely(num_dma < 0))
355     - goto dma_unmap_wqe_err;
356     + goto err_drop;
357    
358     mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
359     num_bytes, num_dma, wi, cseg);
360    
361     return NETDEV_TX_OK;
362    
363     -dma_unmap_wqe_err:
364     +err_drop:
365     sq->stats.dropped++;
366     - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
367     -
368     dev_kfree_skb_any(skb);
369    
370     return NETDEV_TX_OK;
371     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
372     index c2b1d7d351fc..0f745df1506c 100644
373     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
374     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
375     @@ -2143,26 +2143,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
376     memset(vf_stats, 0, sizeof(*vf_stats));
377     vf_stats->rx_packets =
378     MLX5_GET_CTR(out, received_eth_unicast.packets) +
379     + MLX5_GET_CTR(out, received_ib_unicast.packets) +
380     MLX5_GET_CTR(out, received_eth_multicast.packets) +
381     + MLX5_GET_CTR(out, received_ib_multicast.packets) +
382     MLX5_GET_CTR(out, received_eth_broadcast.packets);
383    
384     vf_stats->rx_bytes =
385     MLX5_GET_CTR(out, received_eth_unicast.octets) +
386     + MLX5_GET_CTR(out, received_ib_unicast.octets) +
387     MLX5_GET_CTR(out, received_eth_multicast.octets) +
388     + MLX5_GET_CTR(out, received_ib_multicast.octets) +
389     MLX5_GET_CTR(out, received_eth_broadcast.octets);
390    
391     vf_stats->tx_packets =
392     MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
393     + MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
394     MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
395     + MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
396     MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
397    
398     vf_stats->tx_bytes =
399     MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
400     + MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
401     MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
402     + MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
403     MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
404    
405     vf_stats->multicast =
406     - MLX5_GET_CTR(out, received_eth_multicast.packets);
407     + MLX5_GET_CTR(out, received_eth_multicast.packets) +
408     + MLX5_GET_CTR(out, received_ib_multicast.packets);
409    
410     vf_stats->broadcast =
411     MLX5_GET_CTR(out, received_eth_broadcast.packets);
412     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
413     index 31fc2cfac3b3..4d5b87e0d472 100644
414     --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
415     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
416     @@ -185,6 +185,7 @@ static void del_sw_ns(struct fs_node *node);
417     static void del_sw_hw_rule(struct fs_node *node);
418     static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
419     struct mlx5_flow_destination *d2);
420     +static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
421     static struct mlx5_flow_rule *
422     find_flow_rule(struct fs_fte *fte,
423     struct mlx5_flow_destination *dest);
424     @@ -2329,23 +2330,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
425    
426     static int init_root_ns(struct mlx5_flow_steering *steering)
427     {
428     + int err;
429     +
430     steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
431     if (!steering->root_ns)
432     - goto cleanup;
433     + return -ENOMEM;
434    
435     - if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
436     - goto cleanup;
437     + err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
438     + if (err)
439     + goto out_err;
440    
441     set_prio_attrs(steering->root_ns);
442     -
443     - if (create_anchor_flow_table(steering))
444     - goto cleanup;
445     + err = create_anchor_flow_table(steering);
446     + if (err)
447     + goto out_err;
448    
449     return 0;
450    
451     -cleanup:
452     - mlx5_cleanup_fs(steering->dev);
453     - return -ENOMEM;
454     +out_err:
455     + cleanup_root_ns(steering->root_ns);
456     + steering->root_ns = NULL;
457     + return err;
458     }
459    
460     static void clean_tree(struct fs_node *node)
461     diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
462     index 3529b545675d..1c09a274c637 100644
463     --- a/drivers/net/ethernet/mellanox/mlxsw/core.c
464     +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
465     @@ -1099,11 +1099,11 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
466     err_alloc_lag_mapping:
467     mlxsw_ports_fini(mlxsw_core);
468     err_ports_init:
469     - mlxsw_bus->fini(bus_priv);
470     -err_bus_init:
471     if (!reload)
472     devlink_resources_unregister(devlink, NULL);
473     err_register_resources:
474     + mlxsw_bus->fini(bus_priv);
475     +err_bus_init:
476     if (!reload)
477     devlink_free(devlink);
478     err_devlink_alloc:
479     diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
480     index 161bcdc012f0..fd6b86892595 100644
481     --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
482     +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
483     @@ -1718,13 +1718,11 @@ __mlxsw_sp_port_mdb_del(struct mlxsw_sp_port *mlxsw_sp_port,
484     struct net_device *dev = mlxsw_sp_port->dev;
485     int err;
486    
487     - if (bridge_port->bridge_device->multicast_enabled) {
488     - if (bridge_port->bridge_device->multicast_enabled) {
489     - err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid,
490     - false);
491     - if (err)
492     - netdev_err(dev, "Unable to remove port from SMID\n");
493     - }
494     + if (bridge_port->bridge_device->multicast_enabled &&
495     + !bridge_port->mrouter) {
496     + err = mlxsw_sp_port_smid_set(mlxsw_sp_port, mid->mid, false);
497     + if (err)
498     + netdev_err(dev, "Unable to remove port from SMID\n");
499     }
500    
501     err = mlxsw_sp_port_remove_from_mid(mlxsw_sp_port, mid);
502     diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c
503     index b3567a596fc1..80df9a5d4217 100644
504     --- a/drivers/net/ethernet/netronome/nfp/flower/action.c
505     +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c
506     @@ -183,17 +183,21 @@ static int
507     nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
508     const struct tc_action *action,
509     struct nfp_fl_pre_tunnel *pre_tun,
510     - enum nfp_flower_tun_type tun_type)
511     + enum nfp_flower_tun_type tun_type,
512     + struct net_device *netdev)
513     {
514     size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
515     struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
516     u32 tmp_set_ip_tun_type_index = 0;
517     /* Currently support one pre-tunnel so index is always 0. */
518     int pretun_idx = 0;
519     + struct net *net;
520    
521     if (ip_tun->options_len)
522     return -EOPNOTSUPP;
523    
524     + net = dev_net(netdev);
525     +
526     set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
527     set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
528    
529     @@ -204,6 +208,7 @@ nfp_fl_set_ipv4_udp_tun(struct nfp_fl_set_ipv4_udp_tun *set_tun,
530    
531     set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
532     set_tun->tun_id = ip_tun->key.tun_id;
533     + set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
534    
535     /* Complete pre_tunnel action. */
536     pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
537     @@ -511,7 +516,8 @@ nfp_flower_loop_action(const struct tc_action *a,
538     *a_len += sizeof(struct nfp_fl_pre_tunnel);
539    
540     set_tun = (void *)&nfp_fl->action_data[*a_len];
541     - err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type);
542     + err = nfp_fl_set_ipv4_udp_tun(set_tun, a, pre_tun, *tun_type,
543     + netdev);
544     if (err)
545     return err;
546     *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
547     diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
548     index adfe474c2cf0..329a9b6d453a 100644
549     --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
550     +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h
551     @@ -178,7 +178,10 @@ struct nfp_fl_set_ipv4_udp_tun {
552     __be16 reserved;
553     __be64 tun_id __packed;
554     __be32 tun_type_index;
555     - __be32 extra[3];
556     + __be16 reserved2;
557     + u8 ttl;
558     + u8 reserved3;
559     + __be32 extra[2];
560     };
561    
562     /* Metadata with L2 (1W/4B)
563     diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
564     index d24b47b8e0b2..d118da5a10a2 100644
565     --- a/drivers/net/ethernet/realtek/8139too.c
566     +++ b/drivers/net/ethernet/realtek/8139too.c
567     @@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
568     struct rtl8139_private *tp = netdev_priv(dev);
569     const int irq = tp->pci_dev->irq;
570    
571     - disable_irq(irq);
572     + disable_irq_nosync(irq);
573     rtl8139_interrupt(irq, dev);
574     enable_irq(irq);
575     }
576     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
577     index b4779acb6b5c..18bb1e226e6d 100644
578     --- a/drivers/net/ethernet/realtek/r8169.c
579     +++ b/drivers/net/ethernet/realtek/r8169.c
580     @@ -5087,6 +5087,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
581     static void rtl_pll_power_up(struct rtl8169_private *tp)
582     {
583     rtl_generic_op(tp, tp->pll_power_ops.up);
584     +
585     + /* give MAC/PHY some time to resume */
586     + msleep(20);
587     }
588    
589     static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
590     diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
591     index 8dd545fed30d..6c94af263be8 100644
592     --- a/drivers/net/ethernet/sun/niu.c
593     +++ b/drivers/net/ethernet/sun/niu.c
594     @@ -3443,7 +3443,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
595    
596     len = (val & RCR_ENTRY_L2_LEN) >>
597     RCR_ENTRY_L2_LEN_SHIFT;
598     - len -= ETH_FCS_LEN;
599     + append_size = len + ETH_HLEN + ETH_FCS_LEN;
600    
601     addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
602     RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
603     @@ -3453,7 +3453,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
604     RCR_ENTRY_PKTBUFSZ_SHIFT];
605    
606     off = addr & ~PAGE_MASK;
607     - append_size = rcr_size;
608     if (num_rcr == 1) {
609     int ptype;
610    
611     @@ -3466,7 +3465,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
612     else
613     skb_checksum_none_assert(skb);
614     } else if (!(val & RCR_ENTRY_MULTI))
615     - append_size = len - skb->len;
616     + append_size = append_size - skb->len;
617    
618     niu_rx_skb_append(skb, page, off, append_size, rcr_size);
619     if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
620     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
621     index 33c35b2df7d5..5490c7d09c16 100644
622     --- a/drivers/net/ethernet/ti/cpsw.c
623     +++ b/drivers/net/ethernet/ti/cpsw.c
624     @@ -1278,6 +1278,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
625     cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
626     HOST_PORT_NUM, ALE_VLAN |
627     ALE_SECURE, slave->port_vlan);
628     + cpsw_ale_control_set(cpsw->ale, slave_port,
629     + ALE_PORT_DROP_UNKNOWN_VLAN, 1);
630     }
631    
632     static void soft_reset_slave(struct cpsw_slave *slave)
633     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
634     index f28c85d212ce..4774766fe20d 100644
635     --- a/drivers/net/hyperv/netvsc_drv.c
636     +++ b/drivers/net/hyperv/netvsc_drv.c
637     @@ -1840,7 +1840,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
638     goto rx_handler_failed;
639     }
640    
641     - ret = netdev_upper_dev_link(vf_netdev, ndev, NULL);
642     + ret = netdev_master_upper_dev_link(vf_netdev, ndev,
643     + NULL, NULL, NULL);
644     if (ret != 0) {
645     netdev_err(vf_netdev,
646     "can not set master device %s (err = %d)\n",
647     diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
648     index 465c42e30508..95846f0321f3 100644
649     --- a/drivers/net/hyperv/rndis_filter.c
650     +++ b/drivers/net/hyperv/rndis_filter.c
651     @@ -1282,7 +1282,7 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
652     rndis_device->link_state ? "down" : "up");
653    
654     if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_5)
655     - return net_device;
656     + goto out;
657    
658     rndis_filter_query_link_speed(rndis_device, net_device);
659    
660     diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
661     index 8961209ee949..a386138c7255 100644
662     --- a/drivers/net/phy/sfp-bus.c
663     +++ b/drivers/net/phy/sfp-bus.c
664     @@ -190,7 +190,7 @@ void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
665     if (id->base.br_nominal) {
666     if (id->base.br_nominal != 255) {
667     br_nom = id->base.br_nominal * 100;
668     - br_min = br_nom + id->base.br_nominal * id->ext.br_min;
669     + br_min = br_nom - id->base.br_nominal * id->ext.br_min;
670     br_max = br_nom + id->base.br_nominal * id->ext.br_max;
671     } else if (id->ext.br_max) {
672     br_nom = 250 * id->ext.br_max;
673     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
674     index 51c68fc416fa..42565dd33aa6 100644
675     --- a/drivers/net/usb/qmi_wwan.c
676     +++ b/drivers/net/usb/qmi_wwan.c
677     @@ -1344,6 +1344,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
678     id->driver_info = (unsigned long)&qmi_wwan_info;
679     }
680    
681     + /* There are devices where the same interface number can be
682     + * configured as different functions. We should only bind to
683     + * vendor specific functions when matching on interface number
684     + */
685     + if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
686     + desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
687     + dev_dbg(&intf->dev,
688     + "Rejecting interface number match for class %02x\n",
689     + desc->bInterfaceClass);
690     + return -ENODEV;
691     + }
692     +
693     /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
694     if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
695     dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
696     diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
697     index 84858d5c8257..d9f2229664ad 100644
698     --- a/drivers/scsi/aacraid/commsup.c
699     +++ b/drivers/scsi/aacraid/commsup.c
700     @@ -724,6 +724,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
701     int wait;
702     unsigned long flags = 0;
703     unsigned long mflags = 0;
704     + struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
705     + fibptr->hw_fib_va;
706    
707     fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
708     if (callback) {
709     @@ -734,11 +736,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
710     wait = 1;
711    
712    
713     - if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
714     - struct aac_hba_cmd_req *hbacmd =
715     - (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
716     + hbacmd->iu_type = command;
717    
718     - hbacmd->iu_type = command;
719     + if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
720     /* bit1 of request_id must be 0 */
721     hbacmd->request_id =
722     cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
723     diff --git a/fs/proc/base.c b/fs/proc/base.c
724     index 9298324325ed..f034eccd8616 100644
725     --- a/fs/proc/base.c
726     +++ b/fs/proc/base.c
727     @@ -264,7 +264,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
728     * Inherently racy -- command line shares address space
729     * with code and data.
730     */
731     - rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
732     + rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
733     if (rv <= 0)
734     goto out_free_page;
735    
736     @@ -282,7 +282,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
737     int nr_read;
738    
739     _count = min3(count, len, PAGE_SIZE);
740     - nr_read = access_remote_vm(mm, p, page, _count, 0);
741     + nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
742     if (nr_read < 0)
743     rv = nr_read;
744     if (nr_read <= 0)
745     @@ -328,7 +328,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
746     bool final;
747    
748     _count = min3(count, len, PAGE_SIZE);
749     - nr_read = access_remote_vm(mm, p, page, _count, 0);
750     + nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
751     if (nr_read < 0)
752     rv = nr_read;
753     if (nr_read <= 0)
754     @@ -946,7 +946,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
755     max_len = min_t(size_t, PAGE_SIZE, count);
756     this_len = min(max_len, this_len);
757    
758     - retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
759     + retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
760    
761     if (retval <= 0) {
762     ret = retval;
763     diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
764     index 9d3a03364e6e..1352b1b990a7 100644
765     --- a/include/linux/mlx5/driver.h
766     +++ b/include/linux/mlx5/driver.h
767     @@ -1269,25 +1269,19 @@ enum {
768     };
769    
770     static inline const struct cpumask *
771     -mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector)
772     +mlx5_get_vector_affinity_hint(struct mlx5_core_dev *dev, int vector)
773     {
774     - const struct cpumask *mask;
775     struct irq_desc *desc;
776     unsigned int irq;
777     int eqn;
778     int err;
779    
780     - err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq);
781     + err = mlx5_vector2eqn(dev, vector, &eqn, &irq);
782     if (err)
783     return NULL;
784    
785     desc = irq_to_desc(irq);
786     -#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
787     - mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
788     -#else
789     - mask = desc->irq_common_data.affinity;
790     -#endif
791     - return mask;
792     + return desc->affinity_hint;
793     }
794    
795     #endif /* MLX5_DRIVER_H */
796     diff --git a/include/linux/mm.h b/include/linux/mm.h
797     index 95a2d748e978..a4e9bdbec490 100644
798     --- a/include/linux/mm.h
799     +++ b/include/linux/mm.h
800     @@ -2441,6 +2441,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
801     #define FOLL_MLOCK 0x1000 /* lock present pages */
802     #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
803     #define FOLL_COW 0x4000 /* internal GUP flag */
804     +#define FOLL_ANON 0x8000 /* don't do file mappings */
805    
806     static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
807     {
808     diff --git a/include/net/bonding.h b/include/net/bonding.h
809     index f801fc940b29..b52235158836 100644
810     --- a/include/net/bonding.h
811     +++ b/include/net/bonding.h
812     @@ -198,6 +198,7 @@ struct bonding {
813     struct slave __rcu *primary_slave;
814     struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
815     bool force_primary;
816     + u32 nest_level;
817     s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
818     int (*recv_probe)(const struct sk_buff *, struct bonding *,
819     struct slave *);
820     diff --git a/include/net/tls.h b/include/net/tls.h
821     index 4913430ab807..f5c928a76994 100644
822     --- a/include/net/tls.h
823     +++ b/include/net/tls.h
824     @@ -102,6 +102,7 @@ struct tls_context {
825     struct scatterlist *partially_sent_record;
826     u16 partially_sent_offset;
827     unsigned long flags;
828     + bool in_tcp_sendpages;
829    
830     u16 pending_open_record_frags;
831     int (*push_pending_record)(struct sock *sk, int flags);
832     diff --git a/mm/gup.c b/mm/gup.c
833     index 8f3a06408e28..f5f83c2e6c83 100644
834     --- a/mm/gup.c
835     +++ b/mm/gup.c
836     @@ -544,6 +544,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
837     if (vm_flags & (VM_IO | VM_PFNMAP))
838     return -EFAULT;
839    
840     + if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
841     + return -EFAULT;
842     +
843     if (write) {
844     if (!(vm_flags & VM_WRITE)) {
845     if (!(gup_flags & FOLL_FORCE))
846     diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
847     index 9ba4ed65c52b..4be5335407c5 100644
848     --- a/net/bridge/br_if.c
849     +++ b/net/bridge/br_if.c
850     @@ -509,8 +509,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev,
851     return -ELOOP;
852     }
853    
854     - /* Device is already being bridged */
855     - if (br_port_exists(dev))
856     + /* Device has master upper dev */
857     + if (netdev_master_upper_dev_get(dev))
858     return -EBUSY;
859    
860     /* No bridging devices that dislike that (e.g. wireless) */
861     diff --git a/net/compat.c b/net/compat.c
862     index 22381719718c..32ed993588d6 100644
863     --- a/net/compat.c
864     +++ b/net/compat.c
865     @@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
866     optname == SO_ATTACH_REUSEPORT_CBPF)
867     return do_set_attach_filter(sock, level, optname,
868     optval, optlen);
869     - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
870     + if (!COMPAT_USE_64BIT_TIME &&
871     + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
872     return do_set_sock_timeout(sock, level, optname, optval, optlen);
873    
874     return sock_setsockopt(sock, level, optname, optval, optlen);
875     @@ -442,7 +443,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
876     static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
877     char __user *optval, int __user *optlen)
878     {
879     - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
880     + if (!COMPAT_USE_64BIT_TIME &&
881     + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
882     return do_get_sock_timeout(sock, level, optname, optval, optlen);
883     return sock_getsockopt(sock, level, optname, optval, optlen);
884     }
885     diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
886     index 92d016e87816..385f153fe031 100644
887     --- a/net/dccp/ccids/ccid2.c
888     +++ b/net/dccp/ccids/ccid2.c
889     @@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
890     DCCPF_SEQ_WMAX));
891     }
892    
893     +static void dccp_tasklet_schedule(struct sock *sk)
894     +{
895     + struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
896     +
897     + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
898     + sock_hold(sk);
899     + __tasklet_schedule(t);
900     + }
901     +}
902     +
903     static void ccid2_hc_tx_rto_expire(struct timer_list *t)
904     {
905     struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
906     @@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(struct timer_list *t)
907    
908     /* if we were blocked before, we may now send cwnd=1 packet */
909     if (sender_was_blocked)
910     - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
911     + dccp_tasklet_schedule(sk);
912     /* restart backed-off timer */
913     sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
914     out:
915     @@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
916     done:
917     /* check if incoming Acks allow pending packets to be sent */
918     if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
919     - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
920     + dccp_tasklet_schedule(sk);
921     dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
922     }
923    
924     diff --git a/net/dccp/timer.c b/net/dccp/timer.c
925     index b50a8732ff43..1501a20a94ca 100644
926     --- a/net/dccp/timer.c
927     +++ b/net/dccp/timer.c
928     @@ -232,6 +232,7 @@ static void dccp_write_xmitlet(unsigned long data)
929     else
930     dccp_write_xmit(sk);
931     bh_unlock_sock(sk);
932     + sock_put(sk);
933     }
934    
935     static void dccp_write_xmit_timer(struct timer_list *t)
936     @@ -240,7 +241,6 @@ static void dccp_write_xmit_timer(struct timer_list *t)
937     struct sock *sk = &dp->dccps_inet_connection.icsk_inet.sk;
938    
939     dccp_write_xmitlet((unsigned long)sk);
940     - sock_put(sk);
941     }
942    
943     void dccp_init_xmit_timers(struct sock *sk)
944     diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
945     index b8f0db54b197..16226d49263d 100644
946     --- a/net/ipv4/ping.c
947     +++ b/net/ipv4/ping.c
948     @@ -775,8 +775,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
949     ipc.addr = faddr = daddr;
950    
951     if (ipc.opt && ipc.opt->opt.srr) {
952     - if (!daddr)
953     - return -EINVAL;
954     + if (!daddr) {
955     + err = -EINVAL;
956     + goto out_free;
957     + }
958     faddr = ipc.opt->opt.faddr;
959     }
960     tos = get_rttos(&ipc, inet);
961     @@ -842,6 +844,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
962    
963     out:
964     ip_rt_put(rt);
965     +out_free:
966     if (free)
967     kfree(ipc.opt);
968     if (!err) {
969     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
970     index 9d9b8358a898..df1c04d75f93 100644
971     --- a/net/ipv4/route.c
972     +++ b/net/ipv4/route.c
973     @@ -710,7 +710,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
974     fnhe->fnhe_gw = gw;
975     fnhe->fnhe_pmtu = pmtu;
976     fnhe->fnhe_mtu_locked = lock;
977     - fnhe->fnhe_expires = expires;
978     + fnhe->fnhe_expires = max(1UL, expires);
979    
980     /* Exception created; mark the cached routes for the nexthop
981     * stale, so anyone caching it rechecks if this exception
982     @@ -1298,6 +1298,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
983     return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
984     }
985    
986     +static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
987     +{
988     + struct fnhe_hash_bucket *hash;
989     + struct fib_nh_exception *fnhe, __rcu **fnhe_p;
990     + u32 hval = fnhe_hashfun(daddr);
991     +
992     + spin_lock_bh(&fnhe_lock);
993     +
994     + hash = rcu_dereference_protected(nh->nh_exceptions,
995     + lockdep_is_held(&fnhe_lock));
996     + hash += hval;
997     +
998     + fnhe_p = &hash->chain;
999     + fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1000     + while (fnhe) {
1001     + if (fnhe->fnhe_daddr == daddr) {
1002     + rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1003     + fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1004     + fnhe_flush_routes(fnhe);
1005     + kfree_rcu(fnhe, rcu);
1006     + break;
1007     + }
1008     + fnhe_p = &fnhe->fnhe_next;
1009     + fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1010     + lockdep_is_held(&fnhe_lock));
1011     + }
1012     +
1013     + spin_unlock_bh(&fnhe_lock);
1014     +}
1015     +
1016     static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1017     {
1018     struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1019     @@ -1311,8 +1341,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1020    
1021     for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1022     fnhe = rcu_dereference(fnhe->fnhe_next)) {
1023     - if (fnhe->fnhe_daddr == daddr)
1024     + if (fnhe->fnhe_daddr == daddr) {
1025     + if (fnhe->fnhe_expires &&
1026     + time_after(jiffies, fnhe->fnhe_expires)) {
1027     + ip_del_fnhe(nh, daddr);
1028     + break;
1029     + }
1030     return fnhe;
1031     + }
1032     }
1033     return NULL;
1034     }
1035     @@ -1340,6 +1376,7 @@ static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1036     fnhe->fnhe_gw = 0;
1037     fnhe->fnhe_pmtu = 0;
1038     fnhe->fnhe_expires = 0;
1039     + fnhe->fnhe_mtu_locked = false;
1040     fnhe_flush_routes(fnhe);
1041     orig = NULL;
1042     }
1043     @@ -1638,36 +1675,6 @@ static void ip_handle_martian_source(struct net_device *dev,
1044     #endif
1045     }
1046    
1047     -static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
1048     -{
1049     - struct fnhe_hash_bucket *hash;
1050     - struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1051     - u32 hval = fnhe_hashfun(daddr);
1052     -
1053     - spin_lock_bh(&fnhe_lock);
1054     -
1055     - hash = rcu_dereference_protected(nh->nh_exceptions,
1056     - lockdep_is_held(&fnhe_lock));
1057     - hash += hval;
1058     -
1059     - fnhe_p = &hash->chain;
1060     - fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1061     - while (fnhe) {
1062     - if (fnhe->fnhe_daddr == daddr) {
1063     - rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1064     - fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1065     - fnhe_flush_routes(fnhe);
1066     - kfree_rcu(fnhe, rcu);
1067     - break;
1068     - }
1069     - fnhe_p = &fnhe->fnhe_next;
1070     - fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1071     - lockdep_is_held(&fnhe_lock));
1072     - }
1073     -
1074     - spin_unlock_bh(&fnhe_lock);
1075     -}
1076     -
1077     static void set_lwt_redirect(struct rtable *rth)
1078     {
1079     if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
1080     @@ -1734,20 +1741,10 @@ static int __mkroute_input(struct sk_buff *skb,
1081    
1082     fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1083     if (do_cache) {
1084     - if (fnhe) {
1085     + if (fnhe)
1086     rth = rcu_dereference(fnhe->fnhe_rth_input);
1087     - if (rth && rth->dst.expires &&
1088     - time_after(jiffies, rth->dst.expires)) {
1089     - ip_del_fnhe(&FIB_RES_NH(*res), daddr);
1090     - fnhe = NULL;
1091     - } else {
1092     - goto rt_cache;
1093     - }
1094     - }
1095     -
1096     - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1097     -
1098     -rt_cache:
1099     + else
1100     + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1101     if (rt_cache_valid(rth)) {
1102     skb_dst_set_noref(skb, &rth->dst);
1103     goto out;
1104     @@ -2224,39 +2221,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
1105     * the loopback interface and the IP_PKTINFO ipi_ifindex will
1106     * be set to the loopback interface as well.
1107     */
1108     - fi = NULL;
1109     + do_cache = false;
1110     }
1111    
1112     fnhe = NULL;
1113     do_cache &= fi != NULL;
1114     - if (do_cache) {
1115     + if (fi) {
1116     struct rtable __rcu **prth;
1117     struct fib_nh *nh = &FIB_RES_NH(*res);
1118    
1119     fnhe = find_exception(nh, fl4->daddr);
1120     + if (!do_cache)
1121     + goto add;
1122     if (fnhe) {
1123     prth = &fnhe->fnhe_rth_output;
1124     - rth = rcu_dereference(*prth);
1125     - if (rth && rth->dst.expires &&
1126     - time_after(jiffies, rth->dst.expires)) {
1127     - ip_del_fnhe(nh, fl4->daddr);
1128     - fnhe = NULL;
1129     - } else {
1130     - goto rt_cache;
1131     + } else {
1132     + if (unlikely(fl4->flowi4_flags &
1133     + FLOWI_FLAG_KNOWN_NH &&
1134     + !(nh->nh_gw &&
1135     + nh->nh_scope == RT_SCOPE_LINK))) {
1136     + do_cache = false;
1137     + goto add;
1138     }
1139     + prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
1140     }
1141     -
1142     - if (unlikely(fl4->flowi4_flags &
1143     - FLOWI_FLAG_KNOWN_NH &&
1144     - !(nh->nh_gw &&
1145     - nh->nh_scope == RT_SCOPE_LINK))) {
1146     - do_cache = false;
1147     - goto add;
1148     - }
1149     - prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
1150     rth = rcu_dereference(*prth);
1151     -
1152     -rt_cache:
1153     if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
1154     return rth;
1155     }
1156     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1157     index c92fd253fc46..80e39d9a0423 100644
1158     --- a/net/ipv4/tcp.c
1159     +++ b/net/ipv4/tcp.c
1160     @@ -688,7 +688,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
1161     {
1162     return skb->len < size_goal &&
1163     sock_net(sk)->ipv4.sysctl_tcp_autocorking &&
1164     - skb != tcp_write_queue_head(sk) &&
1165     + !tcp_rtx_queue_empty(sk) &&
1166     refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
1167     }
1168    
1169     @@ -1210,7 +1210,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
1170     uarg->zerocopy = 0;
1171     }
1172    
1173     - if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
1174     + if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
1175     + !tp->repair) {
1176     err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1177     if (err == -EINPROGRESS && copied_syn > 0)
1178     goto out;
1179     diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
1180     index a471f696e13c..29478454b527 100644
1181     --- a/net/ipv4/tcp_bbr.c
1182     +++ b/net/ipv4/tcp_bbr.c
1183     @@ -803,7 +803,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
1184     }
1185     }
1186     }
1187     - bbr->idle_restart = 0;
1188     + /* Restart after idle ends only once we process a new S/ACK for data */
1189     + if (rs->delivered > 0)
1190     + bbr->idle_restart = 0;
1191     }
1192    
1193     static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
1194     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
1195     index e5ef7c38c934..b6ba51536b37 100644
1196     --- a/net/ipv4/udp.c
1197     +++ b/net/ipv4/udp.c
1198     @@ -407,9 +407,9 @@ static int compute_score(struct sock *sk, struct net *net,
1199     bool dev_match = (sk->sk_bound_dev_if == dif ||
1200     sk->sk_bound_dev_if == sdif);
1201    
1202     - if (exact_dif && !dev_match)
1203     + if (!dev_match)
1204     return -1;
1205     - if (sk->sk_bound_dev_if && dev_match)
1206     + if (sk->sk_bound_dev_if)
1207     score += 4;
1208     }
1209    
1210     @@ -958,8 +958,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1211     sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
1212    
1213     if (ipc.opt && ipc.opt->opt.srr) {
1214     - if (!daddr)
1215     - return -EINVAL;
1216     + if (!daddr) {
1217     + err = -EINVAL;
1218     + goto out_free;
1219     + }
1220     faddr = ipc.opt->opt.faddr;
1221     connected = 0;
1222     }
1223     @@ -1080,6 +1082,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
1224    
1225     out:
1226     ip_rt_put(rt);
1227     +out_free:
1228     if (free)
1229     kfree(ipc.opt);
1230     if (!err)
1231     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1232     index 74a2e37412b2..1aee1a537cb1 100644
1233     --- a/net/ipv6/route.c
1234     +++ b/net/ipv6/route.c
1235     @@ -1822,11 +1822,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1236     const struct ipv6hdr *inner_iph;
1237     const struct icmp6hdr *icmph;
1238     struct ipv6hdr _inner_iph;
1239     + struct icmp6hdr _icmph;
1240    
1241     if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1242     goto out;
1243    
1244     - icmph = icmp6_hdr(skb);
1245     + icmph = skb_header_pointer(skb, skb_transport_offset(skb),
1246     + sizeof(_icmph), &_icmph);
1247     + if (!icmph)
1248     + goto out;
1249     +
1250     if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1251     icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1252     icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1253     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1254     index 52e3ea0e6f50..68d589f8d2b2 100644
1255     --- a/net/ipv6/udp.c
1256     +++ b/net/ipv6/udp.c
1257     @@ -148,9 +148,9 @@ static int compute_score(struct sock *sk, struct net *net,
1258     bool dev_match = (sk->sk_bound_dev_if == dif ||
1259     sk->sk_bound_dev_if == sdif);
1260    
1261     - if (exact_dif && !dev_match)
1262     + if (!dev_match)
1263     return -1;
1264     - if (sk->sk_bound_dev_if && dev_match)
1265     + if (sk->sk_bound_dev_if)
1266     score++;
1267     }
1268    
1269     diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
1270     index cf41d9b4a0b8..b49f5afab405 100644
1271     --- a/net/llc/af_llc.c
1272     +++ b/net/llc/af_llc.c
1273     @@ -930,6 +930,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1274     if (size > llc->dev->mtu)
1275     size = llc->dev->mtu;
1276     copied = size - hdrlen;
1277     + rc = -EINVAL;
1278     + if (copied < 0)
1279     + goto release;
1280     release_sock(sk);
1281     skb = sock_alloc_send_skb(sk, size, noblock, &rc);
1282     lock_sock(sk);
1283     diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
1284     index d7da99a0b0b8..9696ef96b719 100644
1285     --- a/net/nsh/nsh.c
1286     +++ b/net/nsh/nsh.c
1287     @@ -57,6 +57,8 @@ int nsh_pop(struct sk_buff *skb)
1288     return -ENOMEM;
1289     nh = (struct nshhdr *)(skb->data);
1290     length = nsh_hdr_len(nh);
1291     + if (length < NSH_BASE_HDR_LEN)
1292     + return -EINVAL;
1293     inner_proto = tun_p_to_eth_p(nh->np);
1294     if (!pskb_may_pull(skb, length))
1295     return -ENOMEM;
1296     @@ -90,6 +92,8 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
1297     if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
1298     goto out;
1299     nsh_len = nsh_hdr_len(nsh_hdr(skb));
1300     + if (nsh_len < NSH_BASE_HDR_LEN)
1301     + goto out;
1302     if (unlikely(!pskb_may_pull(skb, nsh_len)))
1303     goto out;
1304    
1305     diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
1306     index 7322aa1e382e..492ab0c36f7c 100644
1307     --- a/net/openvswitch/flow_netlink.c
1308     +++ b/net/openvswitch/flow_netlink.c
1309     @@ -1712,13 +1712,10 @@ static void nlattr_set(struct nlattr *attr, u8 val,
1310    
1311     /* The nlattr stream should already have been validated */
1312     nla_for_each_nested(nla, attr, rem) {
1313     - if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
1314     - if (tbl[nla_type(nla)].next)
1315     - tbl = tbl[nla_type(nla)].next;
1316     - nlattr_set(nla, val, tbl);
1317     - } else {
1318     + if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
1319     + nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
1320     + else
1321     memset(nla_data(nla), val, nla_len(nla));
1322     - }
1323    
1324     if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
1325     *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
1326     diff --git a/net/rds/recv.c b/net/rds/recv.c
1327     index b25bcfe411ca..555f07ccf0dc 100644
1328     --- a/net/rds/recv.c
1329     +++ b/net/rds/recv.c
1330     @@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
1331     struct rds_cmsg_rx_trace t;
1332     int i, j;
1333    
1334     + memset(&t, 0, sizeof(t));
1335     inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
1336     t.rx_traces = rs->rs_rx_traces;
1337     for (i = 0; i < rs->rs_rx_traces; i++) {
1338     diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
1339     index 7b0700f52b50..d87b611046bb 100644
1340     --- a/net/sched/act_skbmod.c
1341     +++ b/net/sched/act_skbmod.c
1342     @@ -131,8 +131,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
1343     if (exists && bind)
1344     return 0;
1345    
1346     - if (!lflags)
1347     + if (!lflags) {
1348     + if (exists)
1349     + tcf_idr_release(*a, bind);
1350     return -EINVAL;
1351     + }
1352    
1353     if (!exists) {
1354     ret = tcf_idr_create(tn, parm->index, est, a,
1355     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
1356     index 247b7cc20c13..c2c732aad87c 100644
1357     --- a/net/sched/cls_api.c
1358     +++ b/net/sched/cls_api.c
1359     @@ -152,8 +152,8 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
1360     NL_SET_ERR_MSG(extack, "TC classifier not found");
1361     err = -ENOENT;
1362     }
1363     - goto errout;
1364     #endif
1365     + goto errout;
1366     }
1367     tp->classify = tp->ops->classify;
1368     tp->protocol = protocol;
1369     diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
1370     index a366e4c9413a..4808713c73b9 100644
1371     --- a/net/sched/sch_fq.c
1372     +++ b/net/sched/sch_fq.c
1373     @@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
1374     return f->next == &detached;
1375     }
1376    
1377     +static bool fq_flow_is_throttled(const struct fq_flow *f)
1378     +{
1379     + return f->next == &throttled;
1380     +}
1381     +
1382     +static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
1383     +{
1384     + if (head->first)
1385     + head->last->next = flow;
1386     + else
1387     + head->first = flow;
1388     + head->last = flow;
1389     + flow->next = NULL;
1390     +}
1391     +
1392     +static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
1393     +{
1394     + rb_erase(&f->rate_node, &q->delayed);
1395     + q->throttled_flows--;
1396     + fq_flow_add_tail(&q->old_flows, f);
1397     +}
1398     +
1399     static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
1400     {
1401     struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
1402     @@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
1403    
1404     static struct kmem_cache *fq_flow_cachep __read_mostly;
1405    
1406     -static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
1407     -{
1408     - if (head->first)
1409     - head->last->next = flow;
1410     - else
1411     - head->first = flow;
1412     - head->last = flow;
1413     - flow->next = NULL;
1414     -}
1415    
1416     /* limit number of collected flows per round */
1417     #define FQ_GC_MAX 8
1418     @@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
1419     f->socket_hash != sk->sk_hash)) {
1420     f->credit = q->initial_quantum;
1421     f->socket_hash = sk->sk_hash;
1422     + if (fq_flow_is_throttled(f))
1423     + fq_flow_unset_throttled(q, f);
1424     f->time_next_packet = 0ULL;
1425     }
1426     return f;
1427     @@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
1428     q->time_next_delayed_flow = f->time_next_packet;
1429     break;
1430     }
1431     - rb_erase(p, &q->delayed);
1432     - q->throttled_flows--;
1433     - fq_flow_add_tail(&q->old_flows, f);
1434     + fq_flow_unset_throttled(q, f);
1435     }
1436     }
1437    
1438     diff --git a/net/sctp/associola.c b/net/sctp/associola.c
1439     index 837806dd5799..a47179da24e6 100644
1440     --- a/net/sctp/associola.c
1441     +++ b/net/sctp/associola.c
1442     @@ -1024,8 +1024,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1443     struct sctp_endpoint *ep;
1444     struct sctp_chunk *chunk;
1445     struct sctp_inq *inqueue;
1446     - int state;
1447     + int first_time = 1; /* is this the first time through the loop */
1448     int error = 0;
1449     + int state;
1450    
1451     /* The association should be held so we should be safe. */
1452     ep = asoc->ep;
1453     @@ -1036,6 +1037,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1454     state = asoc->state;
1455     subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1456    
1457     + /* If the first chunk in the packet is AUTH, do special
1458     + * processing specified in Section 6.3 of SCTP-AUTH spec
1459     + */
1460     + if (first_time && subtype.chunk == SCTP_CID_AUTH) {
1461     + struct sctp_chunkhdr *next_hdr;
1462     +
1463     + next_hdr = sctp_inq_peek(inqueue);
1464     + if (!next_hdr)
1465     + goto normal;
1466     +
1467     + /* If the next chunk is COOKIE-ECHO, skip the AUTH
1468     + * chunk while saving a pointer to it so we can do
1469     + * Authentication later (during cookie-echo
1470     + * processing).
1471     + */
1472     + if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1473     + chunk->auth_chunk = skb_clone(chunk->skb,
1474     + GFP_ATOMIC);
1475     + chunk->auth = 1;
1476     + continue;
1477     + }
1478     + }
1479     +
1480     +normal:
1481     /* SCTP-AUTH, Section 6.3:
1482     * The receiver has a list of chunk types which it expects
1483     * to be received only after an AUTH-chunk. This list has
1484     @@ -1074,6 +1099,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1485     /* If there is an error on chunk, discard this packet. */
1486     if (error && chunk)
1487     chunk->pdiscard = 1;
1488     +
1489     + if (first_time)
1490     + first_time = 0;
1491     }
1492     sctp_association_put(asoc);
1493     }
1494     diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
1495     index 23ebc5318edc..eb93ffe2408b 100644
1496     --- a/net/sctp/inqueue.c
1497     +++ b/net/sctp/inqueue.c
1498     @@ -217,7 +217,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
1499     skb_pull(chunk->skb, sizeof(*ch));
1500     chunk->subh.v = NULL; /* Subheader is no longer valid. */
1501    
1502     - if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) {
1503     + if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
1504     /* This is not a singleton */
1505     chunk->singleton = 0;
1506     } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
1507     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1508     index 07b64719d1bc..351e80c59211 100644
1509     --- a/net/sctp/ipv6.c
1510     +++ b/net/sctp/ipv6.c
1511     @@ -866,6 +866,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
1512     if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
1513     return 1;
1514    
1515     + if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
1516     + return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
1517     +
1518     return __sctp_v6_cmp_addr(addr1, addr2);
1519     }
1520    
1521     diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
1522     index eb7905ffe5f2..88573c57e106 100644
1523     --- a/net/sctp/sm_statefuns.c
1524     +++ b/net/sctp/sm_statefuns.c
1525     @@ -153,10 +153,7 @@ static enum sctp_disposition sctp_sf_violation_chunk(
1526     struct sctp_cmd_seq *commands);
1527    
1528     static enum sctp_ierror sctp_sf_authenticate(
1529     - struct net *net,
1530     - const struct sctp_endpoint *ep,
1531     const struct sctp_association *asoc,
1532     - const union sctp_subtype type,
1533     struct sctp_chunk *chunk);
1534    
1535     static enum sctp_disposition __sctp_sf_do_9_1_abort(
1536     @@ -621,6 +618,38 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
1537     return SCTP_DISPOSITION_CONSUME;
1538     }
1539    
1540     +static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
1541     + const struct sctp_association *asoc)
1542     +{
1543     + struct sctp_chunk auth;
1544     +
1545     + if (!chunk->auth_chunk)
1546     + return true;
1547     +
1548     + /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
1549     + * is supposed to be authenticated and we have to do delayed
1550     + * authentication. We've just recreated the association using
1551     + * the information in the cookie and now it's much easier to
1552     + * do the authentication.
1553     + */
1554     +
1555     + /* Make sure that we and the peer are AUTH capable */
1556     + if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
1557     + return false;
1558     +
1559     + /* set-up our fake chunk so that we can process it */
1560     + auth.skb = chunk->auth_chunk;
1561     + auth.asoc = chunk->asoc;
1562     + auth.sctp_hdr = chunk->sctp_hdr;
1563     + auth.chunk_hdr = (struct sctp_chunkhdr *)
1564     + skb_push(chunk->auth_chunk,
1565     + sizeof(struct sctp_chunkhdr));
1566     + skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
1567     + auth.transport = chunk->transport;
1568     +
1569     + return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
1570     +}
1571     +
1572     /*
1573     * Respond to a normal COOKIE ECHO chunk.
1574     * We are the side that is being asked for an association.
1575     @@ -758,37 +787,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
1576     if (error)
1577     goto nomem_init;
1578    
1579     - /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
1580     - * is supposed to be authenticated and we have to do delayed
1581     - * authentication. We've just recreated the association using
1582     - * the information in the cookie and now it's much easier to
1583     - * do the authentication.
1584     - */
1585     - if (chunk->auth_chunk) {
1586     - struct sctp_chunk auth;
1587     - enum sctp_ierror ret;
1588     -
1589     - /* Make sure that we and the peer are AUTH capable */
1590     - if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
1591     - sctp_association_free(new_asoc);
1592     - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1593     - }
1594     -
1595     - /* set-up our fake chunk so that we can process it */
1596     - auth.skb = chunk->auth_chunk;
1597     - auth.asoc = chunk->asoc;
1598     - auth.sctp_hdr = chunk->sctp_hdr;
1599     - auth.chunk_hdr = (struct sctp_chunkhdr *)
1600     - skb_push(chunk->auth_chunk,
1601     - sizeof(struct sctp_chunkhdr));
1602     - skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
1603     - auth.transport = chunk->transport;
1604     -
1605     - ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
1606     - if (ret != SCTP_IERROR_NO_ERROR) {
1607     - sctp_association_free(new_asoc);
1608     - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1609     - }
1610     + if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
1611     + sctp_association_free(new_asoc);
1612     + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1613     }
1614    
1615     repl = sctp_make_cookie_ack(new_asoc, chunk);
1616     @@ -1758,13 +1759,15 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
1617     GFP_ATOMIC))
1618     goto nomem;
1619    
1620     + if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
1621     + return SCTP_DISPOSITION_DISCARD;
1622     +
1623     /* Make sure no new addresses are being added during the
1624     * restart. Though this is a pretty complicated attack
1625     * since you'd have to get inside the cookie.
1626     */
1627     - if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
1628     + if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
1629     return SCTP_DISPOSITION_CONSUME;
1630     - }
1631    
1632     /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
1633     * the peer has restarted (Action A), it MUST NOT setup a new
1634     @@ -1870,6 +1873,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
1635     GFP_ATOMIC))
1636     goto nomem;
1637    
1638     + if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
1639     + return SCTP_DISPOSITION_DISCARD;
1640     +
1641     /* Update the content of current association. */
1642     sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1643     sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1644     @@ -1964,6 +1970,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
1645     * a COOKIE ACK.
1646     */
1647    
1648     + if (!sctp_auth_chunk_verify(net, chunk, asoc))
1649     + return SCTP_DISPOSITION_DISCARD;
1650     +
1651     /* Don't accidentally move back into established state. */
1652     if (asoc->state < SCTP_STATE_ESTABLISHED) {
1653     sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1654     @@ -2003,7 +2012,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
1655     }
1656     }
1657    
1658     - repl = sctp_make_cookie_ack(new_asoc, chunk);
1659     + repl = sctp_make_cookie_ack(asoc, chunk);
1660     if (!repl)
1661     goto nomem;
1662    
1663     @@ -4108,10 +4117,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
1664     * The return value is the disposition of the chunk.
1665     */
1666     static enum sctp_ierror sctp_sf_authenticate(
1667     - struct net *net,
1668     - const struct sctp_endpoint *ep,
1669     const struct sctp_association *asoc,
1670     - const union sctp_subtype type,
1671     struct sctp_chunk *chunk)
1672     {
1673     struct sctp_authhdr *auth_hdr;
1674     @@ -4209,7 +4215,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net,
1675     commands);
1676    
1677     auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
1678     - error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
1679     + error = sctp_sf_authenticate(asoc, chunk);
1680     switch (error) {
1681     case SCTP_IERROR_AUTH_BAD_HMAC:
1682     /* Generate the ERROR chunk and discard the rest
1683     diff --git a/net/sctp/stream.c b/net/sctp/stream.c
1684     index f799043abec9..f1f1d1b232ba 100644
1685     --- a/net/sctp/stream.c
1686     +++ b/net/sctp/stream.c
1687     @@ -240,6 +240,8 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
1688    
1689     new->out = NULL;
1690     new->in = NULL;
1691     + new->outcnt = 0;
1692     + new->incnt = 0;
1693     }
1694    
1695     static int sctp_send_reconf(struct sctp_association *asoc,
1696     diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
1697     index 84207ad33e8e..8cb7d9858270 100644
1698     --- a/net/sctp/ulpevent.c
1699     +++ b/net/sctp/ulpevent.c
1700     @@ -715,7 +715,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
1701     return event;
1702    
1703     fail_mark:
1704     - sctp_chunk_put(chunk);
1705     kfree_skb(skb);
1706     fail:
1707     return NULL;
1708     diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
1709     index afd5a935bbcb..5a983c9bea53 100644
1710     --- a/net/smc/af_smc.c
1711     +++ b/net/smc/af_smc.c
1712     @@ -973,10 +973,6 @@ static void smc_tcp_listen_work(struct work_struct *work)
1713     }
1714    
1715     out:
1716     - if (lsmc->clcsock) {
1717     - sock_release(lsmc->clcsock);
1718     - lsmc->clcsock = NULL;
1719     - }
1720     release_sock(lsk);
1721     sock_put(&lsmc->sk); /* sock_hold in smc_listen */
1722     }
1723     @@ -1165,13 +1161,15 @@ static __poll_t smc_poll(struct file *file, struct socket *sock,
1724     /* delegate to CLC child sock */
1725     release_sock(sk);
1726     mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
1727     - /* if non-blocking connect finished ... */
1728     lock_sock(sk);
1729     - if ((sk->sk_state == SMC_INIT) && (mask & EPOLLOUT)) {
1730     - sk->sk_err = smc->clcsock->sk->sk_err;
1731     - if (sk->sk_err) {
1732     - mask |= EPOLLERR;
1733     - } else {
1734     + sk->sk_err = smc->clcsock->sk->sk_err;
1735     + if (sk->sk_err) {
1736     + mask |= EPOLLERR;
1737     + } else {
1738     + /* if non-blocking connect finished ... */
1739     + if (sk->sk_state == SMC_INIT &&
1740     + mask & EPOLLOUT &&
1741     + smc->clcsock->sk->sk_state != TCP_CLOSE) {
1742     rc = smc_connect_rdma(smc);
1743     if (rc < 0)
1744     mask |= EPOLLERR;
1745     diff --git a/net/tipc/socket.c b/net/tipc/socket.c
1746     index 7dfa9fc99ec3..df4f504b1fef 100644
1747     --- a/net/tipc/socket.c
1748     +++ b/net/tipc/socket.c
1749     @@ -1518,10 +1518,10 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1750    
1751     srcaddr->sock.family = AF_TIPC;
1752     srcaddr->sock.addrtype = TIPC_ADDR_ID;
1753     + srcaddr->sock.scope = 0;
1754     srcaddr->sock.addr.id.ref = msg_origport(hdr);
1755     srcaddr->sock.addr.id.node = msg_orignode(hdr);
1756     srcaddr->sock.addr.name.domain = 0;
1757     - srcaddr->sock.scope = 0;
1758     m->msg_namelen = sizeof(struct sockaddr_tipc);
1759    
1760     if (!msg_in_group(hdr))
1761     @@ -1530,6 +1530,7 @@ static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
1762     /* Group message users may also want to know sending member's id */
1763     srcaddr->member.family = AF_TIPC;
1764     srcaddr->member.addrtype = TIPC_ADDR_NAME;
1765     + srcaddr->member.scope = 0;
1766     srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1767     srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1768     srcaddr->member.addr.name.domain = 0;
1769     diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
1770     index d824d548447e..b51180c1479a 100644
1771     --- a/net/tls/tls_main.c
1772     +++ b/net/tls/tls_main.c
1773     @@ -107,6 +107,7 @@ int tls_push_sg(struct sock *sk,
1774     size = sg->length - offset;
1775     offset += sg->offset;
1776    
1777     + ctx->in_tcp_sendpages = true;
1778     while (1) {
1779     if (sg_is_last(sg))
1780     sendpage_flags = flags;
1781     @@ -127,6 +128,7 @@ int tls_push_sg(struct sock *sk,
1782     offset -= sg->offset;
1783     ctx->partially_sent_offset = offset;
1784     ctx->partially_sent_record = (void *)sg;
1785     + ctx->in_tcp_sendpages = false;
1786     return ret;
1787     }
1788    
1789     @@ -141,6 +143,8 @@ int tls_push_sg(struct sock *sk,
1790     }
1791    
1792     clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
1793     + ctx->in_tcp_sendpages = false;
1794     + ctx->sk_write_space(sk);
1795    
1796     return 0;
1797     }
1798     @@ -210,6 +214,10 @@ static void tls_write_space(struct sock *sk)
1799     {
1800     struct tls_context *ctx = tls_get_ctx(sk);
1801    
1802     + /* We are already sending pages, ignore notification */
1803     + if (ctx->in_tcp_sendpages)
1804     + return;
1805     +
1806     if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
1807     gfp_t sk_allocation = sk->sk_allocation;
1808     int rc;