Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0141-4.14.42-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 51305 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Makefile b/Makefile
2     index c23d0b0c6c45..777f5685a36b 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 14
9     -SUBLEVEL = 41
10     +SUBLEVEL = 42
11     EXTRAVERSION =
12     NAME = Petit Gorille
13    
14     diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
15     index c02cc817a490..60666db31886 100644
16     --- a/drivers/net/bonding/bond_alb.c
17     +++ b/drivers/net/bonding/bond_alb.c
18     @@ -450,7 +450,7 @@ static void rlb_update_client(struct rlb_client_info *client_info)
19     {
20     int i;
21    
22     - if (!client_info->slave)
23     + if (!client_info->slave || !is_valid_ether_addr(client_info->mac_dst))
24     return;
25    
26     for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
27     @@ -943,6 +943,10 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
28     skb->priority = TC_PRIO_CONTROL;
29     skb->dev = slave->dev;
30    
31     + netdev_dbg(slave->bond->dev,
32     + "Send learning packet: dev %s mac %pM vlan %d\n",
33     + slave->dev->name, mac_addr, vid);
34     +
35     if (vid)
36     __vlan_hwaccel_put_tag(skb, vlan_proto, vid);
37    
38     @@ -965,14 +969,13 @@ static int alb_upper_dev_walk(struct net_device *upper, void *_data)
39     u8 *mac_addr = data->mac_addr;
40     struct bond_vlan_tag *tags;
41    
42     - if (is_vlan_dev(upper) && vlan_get_encap_level(upper) == 0) {
43     - if (strict_match &&
44     - ether_addr_equal_64bits(mac_addr,
45     - upper->dev_addr)) {
46     + if (is_vlan_dev(upper) &&
47     + bond->nest_level == vlan_get_encap_level(upper) - 1) {
48     + if (upper->addr_assign_type == NET_ADDR_STOLEN) {
49     alb_send_lp_vid(slave, mac_addr,
50     vlan_dev_vlan_proto(upper),
51     vlan_dev_vlan_id(upper));
52     - } else if (!strict_match) {
53     + } else {
54     alb_send_lp_vid(slave, upper->dev_addr,
55     vlan_dev_vlan_proto(upper),
56     vlan_dev_vlan_id(upper));
57     diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
58     index bf3be2e6d4a8..f0aa57222f17 100644
59     --- a/drivers/net/bonding/bond_main.c
60     +++ b/drivers/net/bonding/bond_main.c
61     @@ -1734,6 +1734,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
62     if (bond_mode_uses_xmit_hash(bond))
63     bond_update_slave_arr(bond, NULL);
64    
65     + bond->nest_level = dev_get_nest_level(bond_dev);
66     +
67     netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
68     slave_dev->name,
69     bond_is_active_slave(new_slave) ? "an active" : "a backup",
70     diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
71     index 48738eb27806..9a8ef630466f 100644
72     --- a/drivers/net/ethernet/broadcom/tg3.c
73     +++ b/drivers/net/ethernet/broadcom/tg3.c
74     @@ -8723,14 +8723,15 @@ static void tg3_free_consistent(struct tg3 *tp)
75     tg3_mem_rx_release(tp);
76     tg3_mem_tx_release(tp);
77    
78     - /* Protect tg3_get_stats64() from reading freed tp->hw_stats. */
79     - tg3_full_lock(tp, 0);
80     + /* tp->hw_stats can be referenced safely:
81     + * 1. under rtnl_lock
82     + * 2. or under tp->lock if TG3_FLAG_INIT_COMPLETE is set.
83     + */
84     if (tp->hw_stats) {
85     dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
86     tp->hw_stats, tp->stats_mapping);
87     tp->hw_stats = NULL;
88     }
89     - tg3_full_unlock(tp);
90     }
91    
92     /*
93     @@ -14167,7 +14168,7 @@ static void tg3_get_stats64(struct net_device *dev,
94     struct tg3 *tp = netdev_priv(dev);
95    
96     spin_lock_bh(&tp->lock);
97     - if (!tp->hw_stats) {
98     + if (!tp->hw_stats || !tg3_flag(tp, INIT_COMPLETE)) {
99     *stats = tp->net_stats_prev;
100     spin_unlock_bh(&tp->lock);
101     return;
102     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
103     index 67f74fcb265e..5fe56dc4cfae 100644
104     --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
105     +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
106     @@ -1013,6 +1013,22 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
107     if (!coal->tx_max_coalesced_frames_irq)
108     return -EINVAL;
109    
110     + if (coal->tx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
111     + coal->rx_coalesce_usecs > MLX4_EN_MAX_COAL_TIME ||
112     + coal->rx_coalesce_usecs_low > MLX4_EN_MAX_COAL_TIME ||
113     + coal->rx_coalesce_usecs_high > MLX4_EN_MAX_COAL_TIME) {
114     + netdev_info(dev, "%s: maximum coalesce time supported is %d usecs\n",
115     + __func__, MLX4_EN_MAX_COAL_TIME);
116     + return -ERANGE;
117     + }
118     +
119     + if (coal->tx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS ||
120     + coal->rx_max_coalesced_frames > MLX4_EN_MAX_COAL_PKTS) {
121     + netdev_info(dev, "%s: maximum coalesced frames supported is %d\n",
122     + __func__, MLX4_EN_MAX_COAL_PKTS);
123     + return -ERANGE;
124     + }
125     +
126     priv->rx_frames = (coal->rx_max_coalesced_frames ==
127     MLX4_EN_AUTO_CONF) ?
128     MLX4_EN_RX_COAL_TARGET :
129     diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
130     index c097eef41a9c..faa4bd21f148 100644
131     --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
132     +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
133     @@ -3318,12 +3318,11 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
134     MAX_TX_RINGS, GFP_KERNEL);
135     if (!priv->tx_ring[t]) {
136     err = -ENOMEM;
137     - goto err_free_tx;
138     + goto out;
139     }
140     priv->tx_cq[t] = kzalloc(sizeof(struct mlx4_en_cq *) *
141     MAX_TX_RINGS, GFP_KERNEL);
142     if (!priv->tx_cq[t]) {
143     - kfree(priv->tx_ring[t]);
144     err = -ENOMEM;
145     goto out;
146     }
147     @@ -3576,11 +3575,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
148    
149     return 0;
150    
151     -err_free_tx:
152     - while (t--) {
153     - kfree(priv->tx_ring[t]);
154     - kfree(priv->tx_cq[t]);
155     - }
156     out:
157     mlx4_en_destroy_netdev(dev);
158     return err;
159     diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
160     index 2c1a5ff6acfa..09f4764a3f39 100644
161     --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
162     +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
163     @@ -131,6 +131,9 @@
164     #define MLX4_EN_TX_COAL_PKTS 16
165     #define MLX4_EN_TX_COAL_TIME 0x10
166    
167     +#define MLX4_EN_MAX_COAL_PKTS U16_MAX
168     +#define MLX4_EN_MAX_COAL_TIME U16_MAX
169     +
170     #define MLX4_EN_RX_RATE_LOW 400000
171     #define MLX4_EN_RX_COAL_TIME_LOW 0
172     #define MLX4_EN_RX_RATE_HIGH 450000
173     @@ -547,8 +550,8 @@ struct mlx4_en_priv {
174     u16 rx_usecs_low;
175     u32 pkt_rate_high;
176     u16 rx_usecs_high;
177     - u16 sample_interval;
178     - u16 adaptive_rx_coal;
179     + u32 sample_interval;
180     + u32 adaptive_rx_coal;
181     u32 msg_enable;
182     u32 loopback_ok;
183     u32 validate_loopback;
184     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
185     index 42bab73a9f40..ede66e6af786 100644
186     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
187     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
188     @@ -780,6 +780,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
189     f->mask);
190     addr_type = key->addr_type;
191    
192     + /* the HW doesn't support frag first/later */
193     + if (mask->flags & FLOW_DIS_FIRST_FRAG)
194     + return -EOPNOTSUPP;
195     +
196     if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
197     MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
198     MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
199     @@ -1383,7 +1387,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
200     }
201    
202     ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
203     - if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
204     + if (modify_ip_header && ip_proto != IPPROTO_TCP &&
205     + ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
206     pr_info("can't offload re-write of ip proto %d\n", ip_proto);
207     return false;
208     }
209     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
210     index eea7f931cad3..d560047c0bf9 100644
211     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
212     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
213     @@ -234,7 +234,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
214     dma_addr = dma_map_single(sq->pdev, skb_data, headlen,
215     DMA_TO_DEVICE);
216     if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
217     - return -ENOMEM;
218     + goto dma_unmap_wqe_err;
219    
220     dseg->addr = cpu_to_be64(dma_addr);
221     dseg->lkey = sq->mkey_be;
222     @@ -252,7 +252,7 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
223     dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
224     DMA_TO_DEVICE);
225     if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
226     - return -ENOMEM;
227     + goto dma_unmap_wqe_err;
228    
229     dseg->addr = cpu_to_be64(dma_addr);
230     dseg->lkey = sq->mkey_be;
231     @@ -264,6 +264,10 @@ mlx5e_txwqe_build_dsegs(struct mlx5e_txqsq *sq, struct sk_buff *skb,
232     }
233    
234     return num_dma;
235     +
236     +dma_unmap_wqe_err:
237     + mlx5e_dma_unmap_wqe_err(sq, num_dma);
238     + return -ENOMEM;
239     }
240    
241     static inline void
242     @@ -355,17 +359,15 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
243     num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
244     (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
245     if (unlikely(num_dma < 0))
246     - goto dma_unmap_wqe_err;
247     + goto err_drop;
248    
249     mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
250     num_bytes, num_dma, wi, cseg);
251    
252     return NETDEV_TX_OK;
253    
254     -dma_unmap_wqe_err:
255     +err_drop:
256     sq->stats.dropped++;
257     - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
258     -
259     dev_kfree_skb_any(skb);
260    
261     return NETDEV_TX_OK;
262     @@ -594,17 +596,15 @@ netdev_tx_t mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
263     num_dma = mlx5e_txwqe_build_dsegs(sq, skb, skb_data, headlen,
264     (struct mlx5_wqe_data_seg *)cseg + ds_cnt);
265     if (unlikely(num_dma < 0))
266     - goto dma_unmap_wqe_err;
267     + goto err_drop;
268    
269     mlx5e_txwqe_complete(sq, skb, opcode, ds_cnt + num_dma,
270     num_bytes, num_dma, wi, cseg);
271    
272     return NETDEV_TX_OK;
273    
274     -dma_unmap_wqe_err:
275     +err_drop:
276     sq->stats.dropped++;
277     - mlx5e_dma_unmap_wqe_err(sq, wi->num_dma);
278     -
279     dev_kfree_skb_any(skb);
280    
281     return NETDEV_TX_OK;
282     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
283     index c77f4c0c7769..82e37250ed01 100644
284     --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
285     +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
286     @@ -2054,26 +2054,35 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
287     memset(vf_stats, 0, sizeof(*vf_stats));
288     vf_stats->rx_packets =
289     MLX5_GET_CTR(out, received_eth_unicast.packets) +
290     + MLX5_GET_CTR(out, received_ib_unicast.packets) +
291     MLX5_GET_CTR(out, received_eth_multicast.packets) +
292     + MLX5_GET_CTR(out, received_ib_multicast.packets) +
293     MLX5_GET_CTR(out, received_eth_broadcast.packets);
294    
295     vf_stats->rx_bytes =
296     MLX5_GET_CTR(out, received_eth_unicast.octets) +
297     + MLX5_GET_CTR(out, received_ib_unicast.octets) +
298     MLX5_GET_CTR(out, received_eth_multicast.octets) +
299     + MLX5_GET_CTR(out, received_ib_multicast.octets) +
300     MLX5_GET_CTR(out, received_eth_broadcast.octets);
301    
302     vf_stats->tx_packets =
303     MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
304     + MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
305     MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
306     + MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
307     MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
308    
309     vf_stats->tx_bytes =
310     MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
311     + MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
312     MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
313     + MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
314     MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
315    
316     vf_stats->multicast =
317     - MLX5_GET_CTR(out, received_eth_multicast.packets);
318     + MLX5_GET_CTR(out, received_eth_multicast.packets) +
319     + MLX5_GET_CTR(out, received_ib_multicast.packets);
320    
321     vf_stats->broadcast =
322     MLX5_GET_CTR(out, received_eth_broadcast.packets);
323     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
324     index 5a7bea688ec8..33e5ff081e36 100644
325     --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
326     +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
327     @@ -174,6 +174,7 @@ static void del_flow_group(struct fs_node *node);
328     static void del_fte(struct fs_node *node);
329     static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
330     struct mlx5_flow_destination *d2);
331     +static void cleanup_root_ns(struct mlx5_flow_root_namespace *root_ns);
332     static struct mlx5_flow_rule *
333     find_flow_rule(struct fs_fte *fte,
334     struct mlx5_flow_destination *dest);
335     @@ -2041,23 +2042,27 @@ static int create_anchor_flow_table(struct mlx5_flow_steering *steering)
336    
337     static int init_root_ns(struct mlx5_flow_steering *steering)
338     {
339     + int err;
340     +
341     steering->root_ns = create_root_ns(steering, FS_FT_NIC_RX);
342     if (!steering->root_ns)
343     - goto cleanup;
344     + return -ENOMEM;
345    
346     - if (init_root_tree(steering, &root_fs, &steering->root_ns->ns.node))
347     - goto cleanup;
348     + err = init_root_tree(steering, &root_fs, &steering->root_ns->ns.node);
349     + if (err)
350     + goto out_err;
351    
352     set_prio_attrs(steering->root_ns);
353     -
354     - if (create_anchor_flow_table(steering))
355     - goto cleanup;
356     + err = create_anchor_flow_table(steering);
357     + if (err)
358     + goto out_err;
359    
360     return 0;
361    
362     -cleanup:
363     - mlx5_cleanup_fs(steering->dev);
364     - return -ENOMEM;
365     +out_err:
366     + cleanup_root_ns(steering->root_ns);
367     + steering->root_ns = NULL;
368     + return err;
369     }
370    
371     static void clean_tree(struct fs_node *node)
372     diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
373     index d24b47b8e0b2..d118da5a10a2 100644
374     --- a/drivers/net/ethernet/realtek/8139too.c
375     +++ b/drivers/net/ethernet/realtek/8139too.c
376     @@ -2224,7 +2224,7 @@ static void rtl8139_poll_controller(struct net_device *dev)
377     struct rtl8139_private *tp = netdev_priv(dev);
378     const int irq = tp->pci_dev->irq;
379    
380     - disable_irq(irq);
381     + disable_irq_nosync(irq);
382     rtl8139_interrupt(irq, dev);
383     enable_irq(irq);
384     }
385     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
386     index db553d4e8d22..b98fcc9e93e5 100644
387     --- a/drivers/net/ethernet/realtek/r8169.c
388     +++ b/drivers/net/ethernet/realtek/r8169.c
389     @@ -4886,6 +4886,9 @@ static void rtl_pll_power_down(struct rtl8169_private *tp)
390     static void rtl_pll_power_up(struct rtl8169_private *tp)
391     {
392     rtl_generic_op(tp, tp->pll_power_ops.up);
393     +
394     + /* give MAC/PHY some time to resume */
395     + msleep(20);
396     }
397    
398     static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
399     diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
400     index 6a4e8e1bbd90..e92f41d20a2c 100644
401     --- a/drivers/net/ethernet/sun/niu.c
402     +++ b/drivers/net/ethernet/sun/niu.c
403     @@ -3442,7 +3442,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
404    
405     len = (val & RCR_ENTRY_L2_LEN) >>
406     RCR_ENTRY_L2_LEN_SHIFT;
407     - len -= ETH_FCS_LEN;
408     + append_size = len + ETH_HLEN + ETH_FCS_LEN;
409    
410     addr = (val & RCR_ENTRY_PKT_BUF_ADDR) <<
411     RCR_ENTRY_PKT_BUF_ADDR_SHIFT;
412     @@ -3452,7 +3452,6 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
413     RCR_ENTRY_PKTBUFSZ_SHIFT];
414    
415     off = addr & ~PAGE_MASK;
416     - append_size = rcr_size;
417     if (num_rcr == 1) {
418     int ptype;
419    
420     @@ -3465,7 +3464,7 @@ static int niu_process_rx_pkt(struct napi_struct *napi, struct niu *np,
421     else
422     skb_checksum_none_assert(skb);
423     } else if (!(val & RCR_ENTRY_MULTI))
424     - append_size = len - skb->len;
425     + append_size = append_size - skb->len;
426    
427     niu_rx_skb_append(skb, page, off, append_size, rcr_size);
428     if ((page->index + rp->rbr_block_size) - rcr_size == addr) {
429     diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
430     index 992c43b1868f..8cb44eabc283 100644
431     --- a/drivers/net/ethernet/ti/cpsw.c
432     +++ b/drivers/net/ethernet/ti/cpsw.c
433     @@ -1260,6 +1260,8 @@ static inline void cpsw_add_dual_emac_def_ale_entries(
434     cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
435     HOST_PORT_NUM, ALE_VLAN |
436     ALE_SECURE, slave->port_vlan);
437     + cpsw_ale_control_set(cpsw->ale, slave_port,
438     + ALE_PORT_DROP_UNKNOWN_VLAN, 1);
439     }
440    
441     static void soft_reset_slave(struct cpsw_slave *slave)
442     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
443     index c849de3cb046..444e560d928b 100644
444     --- a/drivers/net/hyperv/netvsc_drv.c
445     +++ b/drivers/net/hyperv/netvsc_drv.c
446     @@ -1742,7 +1742,8 @@ static int netvsc_vf_join(struct net_device *vf_netdev,
447     goto rx_handler_failed;
448     }
449    
450     - ret = netdev_upper_dev_link(vf_netdev, ndev);
451     + ret = netdev_master_upper_dev_link(vf_netdev, ndev,
452     + NULL, NULL);
453     if (ret != 0) {
454     netdev_err(vf_netdev,
455     "can not set master device %s (err = %d)\n",
456     diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
457     index 1aad0568dcc6..2f828eb9ace6 100644
458     --- a/drivers/net/usb/qmi_wwan.c
459     +++ b/drivers/net/usb/qmi_wwan.c
460     @@ -1338,6 +1338,18 @@ static int qmi_wwan_probe(struct usb_interface *intf,
461     id->driver_info = (unsigned long)&qmi_wwan_info;
462     }
463    
464     + /* There are devices where the same interface number can be
465     + * configured as different functions. We should only bind to
466     + * vendor specific functions when matching on interface number
467     + */
468     + if (id->match_flags & USB_DEVICE_ID_MATCH_INT_NUMBER &&
469     + desc->bInterfaceClass != USB_CLASS_VENDOR_SPEC) {
470     + dev_dbg(&intf->dev,
471     + "Rejecting interface number match for class %02x\n",
472     + desc->bInterfaceClass);
473     + return -ENODEV;
474     + }
475     +
476     /* Quectel EC20 quirk where we've QMI on interface 4 instead of 0 */
477     if (quectel_ec20_detected(intf) && desc->bInterfaceNumber == 0) {
478     dev_dbg(&intf->dev, "Quectel EC20 quirk, skipping interface 0\n");
479     diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
480     index c0a4fcb7fd0a..3696f9ded252 100644
481     --- a/drivers/scsi/aacraid/commsup.c
482     +++ b/drivers/scsi/aacraid/commsup.c
483     @@ -752,6 +752,8 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
484     int wait;
485     unsigned long flags = 0;
486     unsigned long mflags = 0;
487     + struct aac_hba_cmd_req *hbacmd = (struct aac_hba_cmd_req *)
488     + fibptr->hw_fib_va;
489    
490     fibptr->flags = (FIB_CONTEXT_FLAG | FIB_CONTEXT_FLAG_NATIVE_HBA);
491     if (callback) {
492     @@ -762,11 +764,9 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
493     wait = 1;
494    
495    
496     - if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
497     - struct aac_hba_cmd_req *hbacmd =
498     - (struct aac_hba_cmd_req *)fibptr->hw_fib_va;
499     + hbacmd->iu_type = command;
500    
501     - hbacmd->iu_type = command;
502     + if (command == HBA_IU_TYPE_SCSI_CMD_REQ) {
503     /* bit1 of request_id must be 0 */
504     hbacmd->request_id =
505     cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
506     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
507     index d227d8514b25..1bc62294fe6b 100644
508     --- a/fs/btrfs/extent-tree.c
509     +++ b/fs/btrfs/extent-tree.c
510     @@ -3171,7 +3171,11 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
511     struct btrfs_transaction *cur_trans;
512     int ret = 0;
513    
514     + spin_lock(&root->fs_info->trans_lock);
515     cur_trans = root->fs_info->running_transaction;
516     + if (cur_trans)
517     + refcount_inc(&cur_trans->use_count);
518     + spin_unlock(&root->fs_info->trans_lock);
519     if (!cur_trans)
520     return 0;
521    
522     @@ -3180,6 +3184,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
523     head = btrfs_find_delayed_ref_head(delayed_refs, bytenr);
524     if (!head) {
525     spin_unlock(&delayed_refs->lock);
526     + btrfs_put_transaction(cur_trans);
527     return 0;
528     }
529    
530     @@ -3196,6 +3201,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
531     mutex_lock(&head->mutex);
532     mutex_unlock(&head->mutex);
533     btrfs_put_delayed_ref(&head->node);
534     + btrfs_put_transaction(cur_trans);
535     return -EAGAIN;
536     }
537     spin_unlock(&delayed_refs->lock);
538     @@ -3223,6 +3229,7 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
539     }
540     spin_unlock(&head->lock);
541     mutex_unlock(&head->mutex);
542     + btrfs_put_transaction(cur_trans);
543     return ret;
544     }
545    
546     diff --git a/fs/proc/base.c b/fs/proc/base.c
547     index 2ff11a693360..dd9d4d3a2e39 100644
548     --- a/fs/proc/base.c
549     +++ b/fs/proc/base.c
550     @@ -263,7 +263,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
551     * Inherently racy -- command line shares address space
552     * with code and data.
553     */
554     - rv = access_remote_vm(mm, arg_end - 1, &c, 1, 0);
555     + rv = access_remote_vm(mm, arg_end - 1, &c, 1, FOLL_ANON);
556     if (rv <= 0)
557     goto out_free_page;
558    
559     @@ -281,7 +281,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
560     int nr_read;
561    
562     _count = min3(count, len, PAGE_SIZE);
563     - nr_read = access_remote_vm(mm, p, page, _count, 0);
564     + nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
565     if (nr_read < 0)
566     rv = nr_read;
567     if (nr_read <= 0)
568     @@ -327,7 +327,7 @@ static ssize_t proc_pid_cmdline_read(struct file *file, char __user *buf,
569     bool final;
570    
571     _count = min3(count, len, PAGE_SIZE);
572     - nr_read = access_remote_vm(mm, p, page, _count, 0);
573     + nr_read = access_remote_vm(mm, p, page, _count, FOLL_ANON);
574     if (nr_read < 0)
575     rv = nr_read;
576     if (nr_read <= 0)
577     @@ -946,7 +946,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
578     max_len = min_t(size_t, PAGE_SIZE, count);
579     this_len = min(max_len, this_len);
580    
581     - retval = access_remote_vm(mm, (env_start + src), page, this_len, 0);
582     + retval = access_remote_vm(mm, (env_start + src), page, this_len, FOLL_ANON);
583    
584     if (retval <= 0) {
585     ret = retval;
586     diff --git a/include/linux/mm.h b/include/linux/mm.h
587     index f50deada0f5c..f23215854c80 100644
588     --- a/include/linux/mm.h
589     +++ b/include/linux/mm.h
590     @@ -2383,6 +2383,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
591     #define FOLL_MLOCK 0x1000 /* lock present pages */
592     #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
593     #define FOLL_COW 0x4000 /* internal GUP flag */
594     +#define FOLL_ANON 0x8000 /* don't do file mappings */
595    
596     static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
597     {
598     diff --git a/include/net/bonding.h b/include/net/bonding.h
599     index b2e68657a216..73799da57400 100644
600     --- a/include/net/bonding.h
601     +++ b/include/net/bonding.h
602     @@ -198,6 +198,7 @@ struct bonding {
603     struct slave __rcu *primary_slave;
604     struct bond_up_slave __rcu *slave_arr; /* Array of usable slaves */
605     bool force_primary;
606     + u32 nest_level;
607     s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
608     int (*recv_probe)(const struct sk_buff *, struct bonding *,
609     struct slave *);
610     diff --git a/include/net/tls.h b/include/net/tls.h
611     index df950383b8c1..48940a883d9a 100644
612     --- a/include/net/tls.h
613     +++ b/include/net/tls.h
614     @@ -98,6 +98,7 @@ struct tls_context {
615     struct scatterlist *partially_sent_record;
616     u16 partially_sent_offset;
617     unsigned long flags;
618     + bool in_tcp_sendpages;
619    
620     u16 pending_open_record_frags;
621     int (*push_pending_record)(struct sock *sk, int flags);
622     diff --git a/mm/gup.c b/mm/gup.c
623     index 8fc23a60487d..d2ba0be71441 100644
624     --- a/mm/gup.c
625     +++ b/mm/gup.c
626     @@ -544,6 +544,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
627     if (vm_flags & (VM_IO | VM_PFNMAP))
628     return -EFAULT;
629    
630     + if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
631     + return -EFAULT;
632     +
633     if (write) {
634     if (!(vm_flags & VM_WRITE)) {
635     if (!(gup_flags & FOLL_FORCE))
636     diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
637     index f3aef22931ab..55a73ef388bf 100644
638     --- a/net/bridge/br_if.c
639     +++ b/net/bridge/br_if.c
640     @@ -503,8 +503,8 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
641     if (dev->netdev_ops->ndo_start_xmit == br_dev_xmit)
642     return -ELOOP;
643    
644     - /* Device is already being bridged */
645     - if (br_port_exists(dev))
646     + /* Device has master upper dev */
647     + if (netdev_master_upper_dev_get(dev))
648     return -EBUSY;
649    
650     /* No bridging devices that dislike that (e.g. wireless) */
651     diff --git a/net/compat.c b/net/compat.c
652     index 22381719718c..32ed993588d6 100644
653     --- a/net/compat.c
654     +++ b/net/compat.c
655     @@ -377,7 +377,8 @@ static int compat_sock_setsockopt(struct socket *sock, int level, int optname,
656     optname == SO_ATTACH_REUSEPORT_CBPF)
657     return do_set_attach_filter(sock, level, optname,
658     optval, optlen);
659     - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
660     + if (!COMPAT_USE_64BIT_TIME &&
661     + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
662     return do_set_sock_timeout(sock, level, optname, optval, optlen);
663    
664     return sock_setsockopt(sock, level, optname, optval, optlen);
665     @@ -442,7 +443,8 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
666     static int compat_sock_getsockopt(struct socket *sock, int level, int optname,
667     char __user *optval, int __user *optlen)
668     {
669     - if (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO)
670     + if (!COMPAT_USE_64BIT_TIME &&
671     + (optname == SO_RCVTIMEO || optname == SO_SNDTIMEO))
672     return do_get_sock_timeout(sock, level, optname, optval, optlen);
673     return sock_getsockopt(sock, level, optname, optval, optlen);
674     }
675     diff --git a/net/dccp/ccids/ccid2.c b/net/dccp/ccids/ccid2.c
676     index 97791b0b1b51..3887bc115762 100644
677     --- a/net/dccp/ccids/ccid2.c
678     +++ b/net/dccp/ccids/ccid2.c
679     @@ -126,6 +126,16 @@ static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
680     DCCPF_SEQ_WMAX));
681     }
682    
683     +static void dccp_tasklet_schedule(struct sock *sk)
684     +{
685     + struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
686     +
687     + if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
688     + sock_hold(sk);
689     + __tasklet_schedule(t);
690     + }
691     +}
692     +
693     static void ccid2_hc_tx_rto_expire(unsigned long data)
694     {
695     struct sock *sk = (struct sock *)data;
696     @@ -166,7 +176,7 @@ static void ccid2_hc_tx_rto_expire(unsigned long data)
697    
698     /* if we were blocked before, we may now send cwnd=1 packet */
699     if (sender_was_blocked)
700     - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
701     + dccp_tasklet_schedule(sk);
702     /* restart backed-off timer */
703     sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
704     out:
705     @@ -706,7 +716,7 @@ static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
706     done:
707     /* check if incoming Acks allow pending packets to be sent */
708     if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
709     - tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
710     + dccp_tasklet_schedule(sk);
711     dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
712     }
713    
714     diff --git a/net/dccp/timer.c b/net/dccp/timer.c
715     index 3a2c34027758..2a952cbd6efa 100644
716     --- a/net/dccp/timer.c
717     +++ b/net/dccp/timer.c
718     @@ -230,12 +230,12 @@ static void dccp_write_xmitlet(unsigned long data)
719     else
720     dccp_write_xmit(sk);
721     bh_unlock_sock(sk);
722     + sock_put(sk);
723     }
724    
725     static void dccp_write_xmit_timer(unsigned long data)
726     {
727     dccp_write_xmitlet(data);
728     - sock_put((struct sock *)data);
729     }
730    
731     void dccp_init_xmit_timers(struct sock *sk)
732     diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
733     index b8f0db54b197..16226d49263d 100644
734     --- a/net/ipv4/ping.c
735     +++ b/net/ipv4/ping.c
736     @@ -775,8 +775,10 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
737     ipc.addr = faddr = daddr;
738    
739     if (ipc.opt && ipc.opt->opt.srr) {
740     - if (!daddr)
741     - return -EINVAL;
742     + if (!daddr) {
743     + err = -EINVAL;
744     + goto out_free;
745     + }
746     faddr = ipc.opt->opt.faddr;
747     }
748     tos = get_rttos(&ipc, inet);
749     @@ -842,6 +844,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
750    
751     out:
752     ip_rt_put(rt);
753     +out_free:
754     if (free)
755     kfree(ipc.opt);
756     if (!err) {
757     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
758     index 5ea559f8c456..28bc3a98adc7 100644
759     --- a/net/ipv4/route.c
760     +++ b/net/ipv4/route.c
761     @@ -711,7 +711,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
762     fnhe->fnhe_daddr = daddr;
763     fnhe->fnhe_gw = gw;
764     fnhe->fnhe_pmtu = pmtu;
765     - fnhe->fnhe_expires = expires;
766     + fnhe->fnhe_expires = max(1UL, expires);
767    
768     /* Exception created; mark the cached routes for the nexthop
769     * stale, so anyone caching it rechecks if this exception
770     @@ -1286,6 +1286,36 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst)
771     return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
772     }
773    
774     +static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
775     +{
776     + struct fnhe_hash_bucket *hash;
777     + struct fib_nh_exception *fnhe, __rcu **fnhe_p;
778     + u32 hval = fnhe_hashfun(daddr);
779     +
780     + spin_lock_bh(&fnhe_lock);
781     +
782     + hash = rcu_dereference_protected(nh->nh_exceptions,
783     + lockdep_is_held(&fnhe_lock));
784     + hash += hval;
785     +
786     + fnhe_p = &hash->chain;
787     + fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
788     + while (fnhe) {
789     + if (fnhe->fnhe_daddr == daddr) {
790     + rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
791     + fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
792     + fnhe_flush_routes(fnhe);
793     + kfree_rcu(fnhe, rcu);
794     + break;
795     + }
796     + fnhe_p = &fnhe->fnhe_next;
797     + fnhe = rcu_dereference_protected(fnhe->fnhe_next,
798     + lockdep_is_held(&fnhe_lock));
799     + }
800     +
801     + spin_unlock_bh(&fnhe_lock);
802     +}
803     +
804     static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
805     {
806     struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
807     @@ -1299,8 +1329,14 @@ static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
808    
809     for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
810     fnhe = rcu_dereference(fnhe->fnhe_next)) {
811     - if (fnhe->fnhe_daddr == daddr)
812     + if (fnhe->fnhe_daddr == daddr) {
813     + if (fnhe->fnhe_expires &&
814     + time_after(jiffies, fnhe->fnhe_expires)) {
815     + ip_del_fnhe(nh, daddr);
816     + break;
817     + }
818     return fnhe;
819     + }
820     }
821     return NULL;
822     }
823     @@ -1620,36 +1656,6 @@ static void ip_handle_martian_source(struct net_device *dev,
824     #endif
825     }
826    
827     -static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
828     -{
829     - struct fnhe_hash_bucket *hash;
830     - struct fib_nh_exception *fnhe, __rcu **fnhe_p;
831     - u32 hval = fnhe_hashfun(daddr);
832     -
833     - spin_lock_bh(&fnhe_lock);
834     -
835     - hash = rcu_dereference_protected(nh->nh_exceptions,
836     - lockdep_is_held(&fnhe_lock));
837     - hash += hval;
838     -
839     - fnhe_p = &hash->chain;
840     - fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
841     - while (fnhe) {
842     - if (fnhe->fnhe_daddr == daddr) {
843     - rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
844     - fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
845     - fnhe_flush_routes(fnhe);
846     - kfree_rcu(fnhe, rcu);
847     - break;
848     - }
849     - fnhe_p = &fnhe->fnhe_next;
850     - fnhe = rcu_dereference_protected(fnhe->fnhe_next,
851     - lockdep_is_held(&fnhe_lock));
852     - }
853     -
854     - spin_unlock_bh(&fnhe_lock);
855     -}
856     -
857     static void set_lwt_redirect(struct rtable *rth)
858     {
859     if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
860     @@ -1716,20 +1722,10 @@ static int __mkroute_input(struct sk_buff *skb,
861    
862     fnhe = find_exception(&FIB_RES_NH(*res), daddr);
863     if (do_cache) {
864     - if (fnhe) {
865     + if (fnhe)
866     rth = rcu_dereference(fnhe->fnhe_rth_input);
867     - if (rth && rth->dst.expires &&
868     - time_after(jiffies, rth->dst.expires)) {
869     - ip_del_fnhe(&FIB_RES_NH(*res), daddr);
870     - fnhe = NULL;
871     - } else {
872     - goto rt_cache;
873     - }
874     - }
875     -
876     - rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
877     -
878     -rt_cache:
879     + else
880     + rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
881     if (rt_cache_valid(rth)) {
882     skb_dst_set_noref(skb, &rth->dst);
883     goto out;
884     @@ -2206,39 +2202,31 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
885     * the loopback interface and the IP_PKTINFO ipi_ifindex will
886     * be set to the loopback interface as well.
887     */
888     - fi = NULL;
889     + do_cache = false;
890     }
891    
892     fnhe = NULL;
893     do_cache &= fi != NULL;
894     - if (do_cache) {
895     + if (fi) {
896     struct rtable __rcu **prth;
897     struct fib_nh *nh = &FIB_RES_NH(*res);
898    
899     fnhe = find_exception(nh, fl4->daddr);
900     + if (!do_cache)
901     + goto add;
902     if (fnhe) {
903     prth = &fnhe->fnhe_rth_output;
904     - rth = rcu_dereference(*prth);
905     - if (rth && rth->dst.expires &&
906     - time_after(jiffies, rth->dst.expires)) {
907     - ip_del_fnhe(nh, fl4->daddr);
908     - fnhe = NULL;
909     - } else {
910     - goto rt_cache;
911     + } else {
912     + if (unlikely(fl4->flowi4_flags &
913     + FLOWI_FLAG_KNOWN_NH &&
914     + !(nh->nh_gw &&
915     + nh->nh_scope == RT_SCOPE_LINK))) {
916     + do_cache = false;
917     + goto add;
918     }
919     + prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
920     }
921     -
922     - if (unlikely(fl4->flowi4_flags &
923     - FLOWI_FLAG_KNOWN_NH &&
924     - !(nh->nh_gw &&
925     - nh->nh_scope == RT_SCOPE_LINK))) {
926     - do_cache = false;
927     - goto add;
928     - }
929     - prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
930     rth = rcu_dereference(*prth);
931     -
932     -rt_cache:
933     if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
934     return rth;
935     }
936     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
937     index b694fbf44a35..e3ece12f0250 100644
938     --- a/net/ipv4/tcp.c
939     +++ b/net/ipv4/tcp.c
940     @@ -1194,7 +1194,8 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
941     uarg->zerocopy = 0;
942     }
943    
944     - if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
945     + if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect) &&
946     + !tp->repair) {
947     err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
948     if (err == -EINPROGRESS && copied_syn > 0)
949     goto out;
950     diff --git a/net/ipv4/tcp_bbr.c b/net/ipv4/tcp_bbr.c
951     index 25c5a0b60cfc..9a0b952dd09b 100644
952     --- a/net/ipv4/tcp_bbr.c
953     +++ b/net/ipv4/tcp_bbr.c
954     @@ -802,7 +802,9 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
955     }
956     }
957     }
958     - bbr->idle_restart = 0;
959     + /* Restart after idle ends only once we process a new S/ACK for data */
960     + if (rs->delivered > 0)
961     + bbr->idle_restart = 0;
962     }
963    
964     static void bbr_update_model(struct sock *sk, const struct rate_sample *rs)
965     diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
966     index c79fa6f6b758..b0ad62bd38f7 100644
967     --- a/net/ipv4/udp.c
968     +++ b/net/ipv4/udp.c
969     @@ -413,9 +413,9 @@ static int compute_score(struct sock *sk, struct net *net,
970     bool dev_match = (sk->sk_bound_dev_if == dif ||
971     sk->sk_bound_dev_if == sdif);
972    
973     - if (exact_dif && !dev_match)
974     + if (!dev_match)
975     return -1;
976     - if (sk->sk_bound_dev_if && dev_match)
977     + if (sk->sk_bound_dev_if)
978     score += 4;
979     }
980    
981     @@ -978,8 +978,10 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
982     sock_tx_timestamp(sk, ipc.sockc.tsflags, &ipc.tx_flags);
983    
984     if (ipc.opt && ipc.opt->opt.srr) {
985     - if (!daddr)
986     - return -EINVAL;
987     + if (!daddr) {
988     + err = -EINVAL;
989     + goto out_free;
990     + }
991     faddr = ipc.opt->opt.faddr;
992     connected = 0;
993     }
994     @@ -1087,6 +1089,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
995    
996     out:
997     ip_rt_put(rt);
998     +out_free:
999     if (free)
1000     kfree(ipc.opt);
1001     if (!err)
1002     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1003     index e04c534b573e..7d50d889ab6e 100644
1004     --- a/net/ipv6/route.c
1005     +++ b/net/ipv6/route.c
1006     @@ -1222,11 +1222,16 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1007     const struct ipv6hdr *inner_iph;
1008     const struct icmp6hdr *icmph;
1009     struct ipv6hdr _inner_iph;
1010     + struct icmp6hdr _icmph;
1011    
1012     if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1013     goto out;
1014    
1015     - icmph = icmp6_hdr(skb);
1016     + icmph = skb_header_pointer(skb, skb_transport_offset(skb),
1017     + sizeof(_icmph), &_icmph);
1018     + if (!icmph)
1019     + goto out;
1020     +
1021     if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1022     icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1023     icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1024     diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
1025     index 40d7234c27b9..0146dcdc5c40 100644
1026     --- a/net/ipv6/udp.c
1027     +++ b/net/ipv6/udp.c
1028     @@ -164,9 +164,9 @@ static int compute_score(struct sock *sk, struct net *net,
1029     bool dev_match = (sk->sk_bound_dev_if == dif ||
1030     sk->sk_bound_dev_if == sdif);
1031    
1032     - if (exact_dif && !dev_match)
1033     + if (!dev_match)
1034     return -1;
1035     - if (sk->sk_bound_dev_if && dev_match)
1036     + if (sk->sk_bound_dev_if)
1037     score++;
1038     }
1039    
1040     diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
1041     index fca69c3771f5..c28223d8092b 100644
1042     --- a/net/l2tp/l2tp_netlink.c
1043     +++ b/net/l2tp/l2tp_netlink.c
1044     @@ -765,8 +765,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl
1045    
1046     if ((session->ifname[0] &&
1047     nla_put_string(skb, L2TP_ATTR_IFNAME, session->ifname)) ||
1048     - (session->offset &&
1049     - nla_put_u16(skb, L2TP_ATTR_OFFSET, session->offset)) ||
1050     (session->cookie_len &&
1051     nla_put(skb, L2TP_ATTR_COOKIE, session->cookie_len,
1052     &session->cookie[0])) ||
1053     diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
1054     index cf41d9b4a0b8..b49f5afab405 100644
1055     --- a/net/llc/af_llc.c
1056     +++ b/net/llc/af_llc.c
1057     @@ -930,6 +930,9 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
1058     if (size > llc->dev->mtu)
1059     size = llc->dev->mtu;
1060     copied = size - hdrlen;
1061     + rc = -EINVAL;
1062     + if (copied < 0)
1063     + goto release;
1064     release_sock(sk);
1065     skb = sock_alloc_send_skb(sk, size, noblock, &rc);
1066     lock_sock(sk);
1067     diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
1068     index 58fb827439a8..6df6f58a8103 100644
1069     --- a/net/nsh/nsh.c
1070     +++ b/net/nsh/nsh.c
1071     @@ -30,6 +30,8 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
1072     if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN)))
1073     goto out;
1074     nsh_len = nsh_hdr_len(nsh_hdr(skb));
1075     + if (nsh_len < NSH_BASE_HDR_LEN)
1076     + goto out;
1077     if (unlikely(!pskb_may_pull(skb, nsh_len)))
1078     goto out;
1079    
1080     diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
1081     index 0d9f6afa266c..4c9c9458374a 100644
1082     --- a/net/openvswitch/flow_netlink.c
1083     +++ b/net/openvswitch/flow_netlink.c
1084     @@ -1404,13 +1404,10 @@ static void nlattr_set(struct nlattr *attr, u8 val,
1085    
1086     /* The nlattr stream should already have been validated */
1087     nla_for_each_nested(nla, attr, rem) {
1088     - if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED) {
1089     - if (tbl[nla_type(nla)].next)
1090     - tbl = tbl[nla_type(nla)].next;
1091     - nlattr_set(nla, val, tbl);
1092     - } else {
1093     + if (tbl[nla_type(nla)].len == OVS_ATTR_NESTED)
1094     + nlattr_set(nla, val, tbl[nla_type(nla)].next ? : tbl);
1095     + else
1096     memset(nla_data(nla), val, nla_len(nla));
1097     - }
1098    
1099     if (nla_type(nla) == OVS_KEY_ATTR_CT_STATE)
1100     *(u32 *)nla_data(nla) &= CT_SUPPORTED_MASK;
1101     diff --git a/net/rds/recv.c b/net/rds/recv.c
1102     index b25bcfe411ca..555f07ccf0dc 100644
1103     --- a/net/rds/recv.c
1104     +++ b/net/rds/recv.c
1105     @@ -558,6 +558,7 @@ static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg,
1106     struct rds_cmsg_rx_trace t;
1107     int i, j;
1108    
1109     + memset(&t, 0, sizeof(t));
1110     inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock();
1111     t.rx_traces = rs->rs_rx_traces;
1112     for (i = 0; i < rs->rs_rx_traces; i++) {
1113     diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c
1114     index 6d10b3af479b..821823b2518a 100644
1115     --- a/net/sched/act_skbmod.c
1116     +++ b/net/sched/act_skbmod.c
1117     @@ -131,8 +131,11 @@ static int tcf_skbmod_init(struct net *net, struct nlattr *nla,
1118     if (exists && bind)
1119     return 0;
1120    
1121     - if (!lflags)
1122     + if (!lflags) {
1123     + if (exists)
1124     + tcf_idr_release(*a, bind);
1125     return -EINVAL;
1126     + }
1127    
1128     if (!exists) {
1129     ret = tcf_idr_create(tn, parm->index, est, a,
1130     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
1131     index c2fab4bcb8be..2f4e1483aced 100644
1132     --- a/net/sched/cls_api.c
1133     +++ b/net/sched/cls_api.c
1134     @@ -151,8 +151,8 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
1135     } else {
1136     err = -ENOENT;
1137     }
1138     - goto errout;
1139     #endif
1140     + goto errout;
1141     }
1142     tp->classify = tp->ops->classify;
1143     tp->protocol = protocol;
1144     diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
1145     index 263d16e3219e..f50eb87cfe79 100644
1146     --- a/net/sched/sch_fq.c
1147     +++ b/net/sched/sch_fq.c
1148     @@ -128,6 +128,28 @@ static bool fq_flow_is_detached(const struct fq_flow *f)
1149     return f->next == &detached;
1150     }
1151    
1152     +static bool fq_flow_is_throttled(const struct fq_flow *f)
1153     +{
1154     + return f->next == &throttled;
1155     +}
1156     +
1157     +static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
1158     +{
1159     + if (head->first)
1160     + head->last->next = flow;
1161     + else
1162     + head->first = flow;
1163     + head->last = flow;
1164     + flow->next = NULL;
1165     +}
1166     +
1167     +static void fq_flow_unset_throttled(struct fq_sched_data *q, struct fq_flow *f)
1168     +{
1169     + rb_erase(&f->rate_node, &q->delayed);
1170     + q->throttled_flows--;
1171     + fq_flow_add_tail(&q->old_flows, f);
1172     +}
1173     +
1174     static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
1175     {
1176     struct rb_node **p = &q->delayed.rb_node, *parent = NULL;
1177     @@ -155,15 +177,6 @@ static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f)
1178    
1179     static struct kmem_cache *fq_flow_cachep __read_mostly;
1180    
1181     -static void fq_flow_add_tail(struct fq_flow_head *head, struct fq_flow *flow)
1182     -{
1183     - if (head->first)
1184     - head->last->next = flow;
1185     - else
1186     - head->first = flow;
1187     - head->last = flow;
1188     - flow->next = NULL;
1189     -}
1190    
1191     /* limit number of collected flows per round */
1192     #define FQ_GC_MAX 8
1193     @@ -267,6 +280,8 @@ static struct fq_flow *fq_classify(struct sk_buff *skb, struct fq_sched_data *q)
1194     f->socket_hash != sk->sk_hash)) {
1195     f->credit = q->initial_quantum;
1196     f->socket_hash = sk->sk_hash;
1197     + if (fq_flow_is_throttled(f))
1198     + fq_flow_unset_throttled(q, f);
1199     f->time_next_packet = 0ULL;
1200     }
1201     return f;
1202     @@ -438,9 +453,7 @@ static void fq_check_throttled(struct fq_sched_data *q, u64 now)
1203     q->time_next_delayed_flow = f->time_next_packet;
1204     break;
1205     }
1206     - rb_erase(p, &q->delayed);
1207     - q->throttled_flows--;
1208     - fq_flow_add_tail(&q->old_flows, f);
1209     + fq_flow_unset_throttled(q, f);
1210     }
1211     }
1212    
1213     diff --git a/net/sctp/associola.c b/net/sctp/associola.c
1214     index dfb9651e818b..58f7d8cfd748 100644
1215     --- a/net/sctp/associola.c
1216     +++ b/net/sctp/associola.c
1217     @@ -1025,8 +1025,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1218     struct sctp_endpoint *ep;
1219     struct sctp_chunk *chunk;
1220     struct sctp_inq *inqueue;
1221     - int state;
1222     + int first_time = 1; /* is this the first time through the loop */
1223     int error = 0;
1224     + int state;
1225    
1226     /* The association should be held so we should be safe. */
1227     ep = asoc->ep;
1228     @@ -1037,6 +1038,30 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1229     state = asoc->state;
1230     subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type);
1231    
1232     + /* If the first chunk in the packet is AUTH, do special
1233     + * processing specified in Section 6.3 of SCTP-AUTH spec
1234     + */
1235     + if (first_time && subtype.chunk == SCTP_CID_AUTH) {
1236     + struct sctp_chunkhdr *next_hdr;
1237     +
1238     + next_hdr = sctp_inq_peek(inqueue);
1239     + if (!next_hdr)
1240     + goto normal;
1241     +
1242     + /* If the next chunk is COOKIE-ECHO, skip the AUTH
1243     + * chunk while saving a pointer to it so we can do
1244     + * Authentication later (during cookie-echo
1245     + * processing).
1246     + */
1247     + if (next_hdr->type == SCTP_CID_COOKIE_ECHO) {
1248     + chunk->auth_chunk = skb_clone(chunk->skb,
1249     + GFP_ATOMIC);
1250     + chunk->auth = 1;
1251     + continue;
1252     + }
1253     + }
1254     +
1255     +normal:
1256     /* SCTP-AUTH, Section 6.3:
1257     * The receiver has a list of chunk types which it expects
1258     * to be received only after an AUTH-chunk. This list has
1259     @@ -1075,6 +1100,9 @@ static void sctp_assoc_bh_rcv(struct work_struct *work)
1260     /* If there is an error on chunk, discard this packet. */
1261     if (error && chunk)
1262     chunk->pdiscard = 1;
1263     +
1264     + if (first_time)
1265     + first_time = 0;
1266     }
1267     sctp_association_put(asoc);
1268     }
1269     diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c
1270     index 48392552ee7c..1aa89d4682f4 100644
1271     --- a/net/sctp/inqueue.c
1272     +++ b/net/sctp/inqueue.c
1273     @@ -217,7 +217,7 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
1274     skb_pull(chunk->skb, sizeof(*ch));
1275     chunk->subh.v = NULL; /* Subheader is no longer valid. */
1276    
1277     - if (chunk->chunk_end + sizeof(*ch) < skb_tail_pointer(chunk->skb)) {
1278     + if (chunk->chunk_end + sizeof(*ch) <= skb_tail_pointer(chunk->skb)) {
1279     /* This is not a singleton */
1280     chunk->singleton = 0;
1281     } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
1282     diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
1283     index 7219a1c041f7..853fecdf6374 100644
1284     --- a/net/sctp/ipv6.c
1285     +++ b/net/sctp/ipv6.c
1286     @@ -865,6 +865,9 @@ static int sctp_inet6_cmp_addr(const union sctp_addr *addr1,
1287     if (sctp_is_any(sk, addr1) || sctp_is_any(sk, addr2))
1288     return 1;
1289    
1290     + if (addr1->sa.sa_family == AF_INET && addr2->sa.sa_family == AF_INET)
1291     + return addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr;
1292     +
1293     return __sctp_v6_cmp_addr(addr1, addr2);
1294     }
1295    
1296     diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
1297     index 8f8ccded13e4..01b078172306 100644
1298     --- a/net/sctp/sm_statefuns.c
1299     +++ b/net/sctp/sm_statefuns.c
1300     @@ -150,10 +150,7 @@ static enum sctp_disposition sctp_sf_violation_chunk(
1301     struct sctp_cmd_seq *commands);
1302    
1303     static enum sctp_ierror sctp_sf_authenticate(
1304     - struct net *net,
1305     - const struct sctp_endpoint *ep,
1306     const struct sctp_association *asoc,
1307     - const union sctp_subtype type,
1308     struct sctp_chunk *chunk);
1309    
1310     static enum sctp_disposition __sctp_sf_do_9_1_abort(
1311     @@ -618,6 +615,38 @@ enum sctp_disposition sctp_sf_do_5_1C_ack(struct net *net,
1312     return SCTP_DISPOSITION_CONSUME;
1313     }
1314    
1315     +static bool sctp_auth_chunk_verify(struct net *net, struct sctp_chunk *chunk,
1316     + const struct sctp_association *asoc)
1317     +{
1318     + struct sctp_chunk auth;
1319     +
1320     + if (!chunk->auth_chunk)
1321     + return true;
1322     +
1323     + /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
1324     + * is supposed to be authenticated and we have to do delayed
1325     + * authentication. We've just recreated the association using
1326     + * the information in the cookie and now it's much easier to
1327     + * do the authentication.
1328     + */
1329     +
1330     + /* Make sure that we and the peer are AUTH capable */
1331     + if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
1332     + return false;
1333     +
1334     + /* set-up our fake chunk so that we can process it */
1335     + auth.skb = chunk->auth_chunk;
1336     + auth.asoc = chunk->asoc;
1337     + auth.sctp_hdr = chunk->sctp_hdr;
1338     + auth.chunk_hdr = (struct sctp_chunkhdr *)
1339     + skb_push(chunk->auth_chunk,
1340     + sizeof(struct sctp_chunkhdr));
1341     + skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
1342     + auth.transport = chunk->transport;
1343     +
1344     + return sctp_sf_authenticate(asoc, &auth) == SCTP_IERROR_NO_ERROR;
1345     +}
1346     +
1347     /*
1348     * Respond to a normal COOKIE ECHO chunk.
1349     * We are the side that is being asked for an association.
1350     @@ -755,37 +784,9 @@ enum sctp_disposition sctp_sf_do_5_1D_ce(struct net *net,
1351     if (error)
1352     goto nomem_init;
1353    
1354     - /* SCTP-AUTH: auth_chunk pointer is only set when the cookie-echo
1355     - * is supposed to be authenticated and we have to do delayed
1356     - * authentication. We've just recreated the association using
1357     - * the information in the cookie and now it's much easier to
1358     - * do the authentication.
1359     - */
1360     - if (chunk->auth_chunk) {
1361     - struct sctp_chunk auth;
1362     - enum sctp_ierror ret;
1363     -
1364     - /* Make sure that we and the peer are AUTH capable */
1365     - if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
1366     - sctp_association_free(new_asoc);
1367     - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1368     - }
1369     -
1370     - /* set-up our fake chunk so that we can process it */
1371     - auth.skb = chunk->auth_chunk;
1372     - auth.asoc = chunk->asoc;
1373     - auth.sctp_hdr = chunk->sctp_hdr;
1374     - auth.chunk_hdr = (struct sctp_chunkhdr *)
1375     - skb_push(chunk->auth_chunk,
1376     - sizeof(struct sctp_chunkhdr));
1377     - skb_pull(chunk->auth_chunk, sizeof(struct sctp_chunkhdr));
1378     - auth.transport = chunk->transport;
1379     -
1380     - ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
1381     - if (ret != SCTP_IERROR_NO_ERROR) {
1382     - sctp_association_free(new_asoc);
1383     - return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1384     - }
1385     + if (!sctp_auth_chunk_verify(net, chunk, new_asoc)) {
1386     + sctp_association_free(new_asoc);
1387     + return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
1388     }
1389    
1390     repl = sctp_make_cookie_ack(new_asoc, chunk);
1391     @@ -1755,13 +1756,15 @@ static enum sctp_disposition sctp_sf_do_dupcook_a(
1392     GFP_ATOMIC))
1393     goto nomem;
1394    
1395     + if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
1396     + return SCTP_DISPOSITION_DISCARD;
1397     +
1398     /* Make sure no new addresses are being added during the
1399     * restart. Though this is a pretty complicated attack
1400     * since you'd have to get inside the cookie.
1401     */
1402     - if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) {
1403     + if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands))
1404     return SCTP_DISPOSITION_CONSUME;
1405     - }
1406    
1407     /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes
1408     * the peer has restarted (Action A), it MUST NOT setup a new
1409     @@ -1867,6 +1870,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_b(
1410     GFP_ATOMIC))
1411     goto nomem;
1412    
1413     + if (!sctp_auth_chunk_verify(net, chunk, new_asoc))
1414     + return SCTP_DISPOSITION_DISCARD;
1415     +
1416     /* Update the content of current association. */
1417     sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc));
1418     sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE,
1419     @@ -1961,6 +1967,9 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
1420     * a COOKIE ACK.
1421     */
1422    
1423     + if (!sctp_auth_chunk_verify(net, chunk, asoc))
1424     + return SCTP_DISPOSITION_DISCARD;
1425     +
1426     /* Don't accidentally move back into established state. */
1427     if (asoc->state < SCTP_STATE_ESTABLISHED) {
1428     sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
1429     @@ -2000,7 +2009,7 @@ static enum sctp_disposition sctp_sf_do_dupcook_d(
1430     }
1431     }
1432    
1433     - repl = sctp_make_cookie_ack(new_asoc, chunk);
1434     + repl = sctp_make_cookie_ack(asoc, chunk);
1435     if (!repl)
1436     goto nomem;
1437    
1438     @@ -4111,10 +4120,7 @@ enum sctp_disposition sctp_sf_eat_fwd_tsn_fast(
1439     * The return value is the disposition of the chunk.
1440     */
1441     static enum sctp_ierror sctp_sf_authenticate(
1442     - struct net *net,
1443     - const struct sctp_endpoint *ep,
1444     const struct sctp_association *asoc,
1445     - const union sctp_subtype type,
1446     struct sctp_chunk *chunk)
1447     {
1448     struct sctp_authhdr *auth_hdr;
1449     @@ -4212,7 +4218,7 @@ enum sctp_disposition sctp_sf_eat_auth(struct net *net,
1450     commands);
1451    
1452     auth_hdr = (struct sctp_authhdr *)chunk->skb->data;
1453     - error = sctp_sf_authenticate(net, ep, asoc, type, chunk);
1454     + error = sctp_sf_authenticate(asoc, chunk);
1455     switch (error) {
1456     case SCTP_IERROR_AUTH_BAD_HMAC:
1457     /* Generate the ERROR chunk and discard the rest
1458     diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
1459     index 5447228bf1a0..8538c96c96c1 100644
1460     --- a/net/sctp/ulpevent.c
1461     +++ b/net/sctp/ulpevent.c
1462     @@ -717,7 +717,6 @@ struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc,
1463     return event;
1464    
1465     fail_mark:
1466     - sctp_chunk_put(chunk);
1467     kfree_skb(skb);
1468     fail:
1469     return NULL;
1470     diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
1471     index 282361ac0263..dfef930d1e50 100644
1472     --- a/net/tls/tls_main.c
1473     +++ b/net/tls/tls_main.c
1474     @@ -87,6 +87,7 @@ int tls_push_sg(struct sock *sk,
1475     size = sg->length - offset;
1476     offset += sg->offset;
1477    
1478     + ctx->in_tcp_sendpages = true;
1479     while (1) {
1480     if (sg_is_last(sg))
1481     sendpage_flags = flags;
1482     @@ -107,6 +108,7 @@ int tls_push_sg(struct sock *sk,
1483     offset -= sg->offset;
1484     ctx->partially_sent_offset = offset;
1485     ctx->partially_sent_record = (void *)sg;
1486     + ctx->in_tcp_sendpages = false;
1487     return ret;
1488     }
1489    
1490     @@ -121,6 +123,8 @@ int tls_push_sg(struct sock *sk,
1491     }
1492    
1493     clear_bit(TLS_PENDING_CLOSED_RECORD, &ctx->flags);
1494     + ctx->in_tcp_sendpages = false;
1495     + ctx->sk_write_space(sk);
1496    
1497     return 0;
1498     }
1499     @@ -190,6 +194,10 @@ static void tls_write_space(struct sock *sk)
1500     {
1501     struct tls_context *ctx = tls_get_ctx(sk);
1502    
1503     + /* We are already sending pages, ignore notification */
1504     + if (ctx->in_tcp_sendpages)
1505     + return;
1506     +
1507     if (!sk->sk_write_pending && tls_is_pending_closed_record(ctx)) {
1508     gfp_t sk_allocation = sk->sk_allocation;
1509     int rc;
1510     diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
1511     index 3f6f6f8c9fa5..5b2409746ae0 100644
1512     --- a/net/xfrm/xfrm_input.c
1513     +++ b/net/xfrm/xfrm_input.c
1514     @@ -518,7 +518,7 @@ int xfrm_trans_queue(struct sk_buff *skb,
1515     return -ENOBUFS;
1516    
1517     XFRM_TRANS_SKB_CB(skb)->finish = finish;
1518     - skb_queue_tail(&trans->queue, skb);
1519     + __skb_queue_tail(&trans->queue, skb);
1520     tasklet_schedule(&trans->tasklet);
1521     return 0;
1522     }
1523     diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
1524     index 8f13fb57eab5..6c4ec69e11a0 100644
1525     --- a/net/xfrm/xfrm_state.c
1526     +++ b/net/xfrm/xfrm_state.c
1527     @@ -1345,6 +1345,7 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig,
1528    
1529     if (orig->aead) {
1530     x->aead = xfrm_algo_aead_clone(orig->aead);
1531     + x->geniv = orig->geniv;
1532     if (!x->aead)
1533     goto error;
1534     }