Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0148-4.14.49-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 7 months ago) by niro
File size: 44641 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Documentation/networking/netdev-FAQ.txt b/Documentation/networking/netdev-FAQ.txt
2     index cfc66ea72329..a365656e4873 100644
3     --- a/Documentation/networking/netdev-FAQ.txt
4     +++ b/Documentation/networking/netdev-FAQ.txt
5     @@ -176,6 +176,15 @@ A: No. See above answer. In short, if you think it really belongs in
6     dash marker line as described in Documentation/process/submitting-patches.rst to
7     temporarily embed that information into the patch that you send.
8    
9     +Q: Are all networking bug fixes backported to all stable releases?
10     +
11     +A: Due to capacity, Dave could only take care of the backports for the last
12     + 2 stable releases. For earlier stable releases, each stable branch maintainer
13     + is supposed to take care of them. If you find any patch is missing from an
14     + earlier stable branch, please notify stable@vger.kernel.org with either a
15     + commit ID or a formal patch backported, and CC Dave and other relevant
16     + networking developers.
17     +
18     Q: Someone said that the comment style and coding convention is different
19     for the networking content. Is this true?
20    
21     diff --git a/Makefile b/Makefile
22     index 7a246f1ce44e..480ae7ef755c 100644
23     --- a/Makefile
24     +++ b/Makefile
25     @@ -1,7 +1,7 @@
26     # SPDX-License-Identifier: GPL-2.0
27     VERSION = 4
28     PATCHLEVEL = 14
29     -SUBLEVEL = 48
30     +SUBLEVEL = 49
31     EXTRAVERSION =
32     NAME = Petit Gorille
33    
34     diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
35     index b3c6e997ccdb..03244b3c985d 100644
36     --- a/drivers/gpu/drm/drm_file.c
37     +++ b/drivers/gpu/drm/drm_file.c
38     @@ -212,6 +212,7 @@ static int drm_open_helper(struct file *filp, struct drm_minor *minor)
39     return -ENOMEM;
40    
41     filp->private_data = priv;
42     + filp->f_mode |= FMODE_UNSIGNED_OFFSET;
43     priv->filp = filp;
44     priv->pid = get_pid(task_pid(current));
45     priv->minor = minor;
46     diff --git a/drivers/isdn/hardware/eicon/diva.c b/drivers/isdn/hardware/eicon/diva.c
47     index 944a7f338099..1b25d8bc153a 100644
48     --- a/drivers/isdn/hardware/eicon/diva.c
49     +++ b/drivers/isdn/hardware/eicon/diva.c
50     @@ -388,10 +388,10 @@ void divasa_xdi_driver_unload(void)
51     ** Receive and process command from user mode utility
52     */
53     void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
54     - int length,
55     + int length, void *mptr,
56     divas_xdi_copy_from_user_fn_t cp_fn)
57     {
58     - diva_xdi_um_cfg_cmd_t msg;
59     + diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
60     diva_os_xdi_adapter_t *a = NULL;
61     diva_os_spin_lock_magic_t old_irql;
62     struct list_head *tmp;
63     @@ -401,21 +401,21 @@ void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
64     length, sizeof(diva_xdi_um_cfg_cmd_t)))
65     return NULL;
66     }
67     - if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
68     + if ((*cp_fn) (os_handle, msg, src, sizeof(*msg)) <= 0) {
69     DBG_ERR(("A: A(?) open, write error"))
70     return NULL;
71     }
72     diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
73     list_for_each(tmp, &adapter_queue) {
74     a = list_entry(tmp, diva_os_xdi_adapter_t, link);
75     - if (a->controller == (int)msg.adapter)
76     + if (a->controller == (int)msg->adapter)
77     break;
78     a = NULL;
79     }
80     diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
81    
82     if (!a) {
83     - DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
84     + DBG_ERR(("A: A(%d) open, adapter not found", msg->adapter))
85     }
86    
87     return (a);
88     @@ -437,8 +437,10 @@ void diva_xdi_close_adapter(void *adapter, void *os_handle)
89    
90     int
91     diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
92     - int length, divas_xdi_copy_from_user_fn_t cp_fn)
93     + int length, void *mptr,
94     + divas_xdi_copy_from_user_fn_t cp_fn)
95     {
96     + diva_xdi_um_cfg_cmd_t *msg = (diva_xdi_um_cfg_cmd_t *)mptr;
97     diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
98     void *data;
99    
100     @@ -459,7 +461,13 @@ diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
101     return (-2);
102     }
103    
104     - length = (*cp_fn) (os_handle, data, src, length);
105     + if (msg) {
106     + *(diva_xdi_um_cfg_cmd_t *)data = *msg;
107     + length = (*cp_fn) (os_handle, (char *)data + sizeof(*msg),
108     + src + sizeof(*msg), length - sizeof(*msg));
109     + } else {
110     + length = (*cp_fn) (os_handle, data, src, length);
111     + }
112     if (length > 0) {
113     if ((*(a->interface.cmd_proc))
114     (a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
115     diff --git a/drivers/isdn/hardware/eicon/diva.h b/drivers/isdn/hardware/eicon/diva.h
116     index b067032093a8..1ad76650fbf9 100644
117     --- a/drivers/isdn/hardware/eicon/diva.h
118     +++ b/drivers/isdn/hardware/eicon/diva.h
119     @@ -20,10 +20,11 @@ int diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
120     int max_length, divas_xdi_copy_to_user_fn_t cp_fn);
121    
122     int diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
123     - int length, divas_xdi_copy_from_user_fn_t cp_fn);
124     + int length, void *msg,
125     + divas_xdi_copy_from_user_fn_t cp_fn);
126    
127     void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
128     - int length,
129     + int length, void *msg,
130     divas_xdi_copy_from_user_fn_t cp_fn);
131    
132     void diva_xdi_close_adapter(void *adapter, void *os_handle);
133     diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c
134     index b2023e08dcd2..932e98d0d901 100644
135     --- a/drivers/isdn/hardware/eicon/divasmain.c
136     +++ b/drivers/isdn/hardware/eicon/divasmain.c
137     @@ -591,19 +591,22 @@ static int divas_release(struct inode *inode, struct file *file)
138     static ssize_t divas_write(struct file *file, const char __user *buf,
139     size_t count, loff_t *ppos)
140     {
141     + diva_xdi_um_cfg_cmd_t msg;
142     int ret = -EINVAL;
143    
144     if (!file->private_data) {
145     file->private_data = diva_xdi_open_adapter(file, buf,
146     - count,
147     + count, &msg,
148     xdi_copy_from_user);
149     - }
150     - if (!file->private_data) {
151     - return (-ENODEV);
152     + if (!file->private_data)
153     + return (-ENODEV);
154     + ret = diva_xdi_write(file->private_data, file,
155     + buf, count, &msg, xdi_copy_from_user);
156     + } else {
157     + ret = diva_xdi_write(file->private_data, file,
158     + buf, count, NULL, xdi_copy_from_user);
159     }
160    
161     - ret = diva_xdi_write(file->private_data, file,
162     - buf, count, xdi_copy_from_user);
163     switch (ret) {
164     case -1: /* Message should be removed from rx mailbox first */
165     ret = -EBUSY;
166     @@ -622,11 +625,12 @@ static ssize_t divas_write(struct file *file, const char __user *buf,
167     static ssize_t divas_read(struct file *file, char __user *buf,
168     size_t count, loff_t *ppos)
169     {
170     + diva_xdi_um_cfg_cmd_t msg;
171     int ret = -EINVAL;
172    
173     if (!file->private_data) {
174     file->private_data = diva_xdi_open_adapter(file, buf,
175     - count,
176     + count, &msg,
177     xdi_copy_from_user);
178     }
179     if (!file->private_data) {
180     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
181     index 7dd83d0ef0a0..22243c480a05 100644
182     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
183     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
184     @@ -588,7 +588,7 @@ static void bnx2x_ets_e3b0_nig_disabled(const struct link_params *params,
185     * slots for the highest priority.
186     */
187     REG_WR(bp, (port) ? NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS :
188     - NIG_REG_P1_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
189     + NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
190     /* Mapping between the CREDIT_WEIGHT registers and actual client
191     * numbers
192     */
193     diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
194     index aef40f02c77f..a03a32a4ffca 100644
195     --- a/drivers/net/ethernet/cisco/enic/enic_main.c
196     +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
197     @@ -2703,11 +2703,11 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
198     pci_set_master(pdev);
199    
200     /* Query PCI controller on system for DMA addressing
201     - * limitation for the device. Try 64-bit first, and
202     + * limitation for the device. Try 47-bit first, and
203     * fail to 32-bit.
204     */
205    
206     - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
207     + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(47));
208     if (err) {
209     err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
210     if (err) {
211     @@ -2721,10 +2721,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
212     goto err_out_release_regions;
213     }
214     } else {
215     - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
216     + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(47));
217     if (err) {
218     dev_err(dev, "Unable to obtain %u-bit DMA "
219     - "for consistent allocations, aborting\n", 64);
220     + "for consistent allocations, aborting\n", 47);
221     goto err_out_release_regions;
222     }
223     using_dac = 1;
224     diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
225     index 1b03c32afc1f..7e2b70c2bba3 100644
226     --- a/drivers/net/ethernet/emulex/benet/be_main.c
227     +++ b/drivers/net/ethernet/emulex/benet/be_main.c
228     @@ -3294,7 +3294,9 @@ void be_detect_error(struct be_adapter *adapter)
229     if ((val & POST_STAGE_FAT_LOG_START)
230     != POST_STAGE_FAT_LOG_START &&
231     (val & POST_STAGE_ARMFW_UE)
232     - != POST_STAGE_ARMFW_UE)
233     + != POST_STAGE_ARMFW_UE &&
234     + (val & POST_STAGE_RECOVERABLE_ERR)
235     + != POST_STAGE_RECOVERABLE_ERR)
236     return;
237     }
238    
239     diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
240     index 22a3bfe1ed8f..73419224367a 100644
241     --- a/drivers/net/ethernet/mellanox/mlx4/qp.c
242     +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
243     @@ -393,11 +393,11 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
244     struct mlx4_qp_table *qp_table = &mlx4_priv(dev)->qp_table;
245     struct mlx4_qp *qp;
246    
247     - spin_lock(&qp_table->lock);
248     + spin_lock_irq(&qp_table->lock);
249    
250     qp = __mlx4_qp_lookup(dev, qpn);
251    
252     - spin_unlock(&qp_table->lock);
253     + spin_unlock_irq(&qp_table->lock);
254     return qp;
255     }
256    
257     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
258     index 3476f594c195..8285e6d24f30 100644
259     --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
260     +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
261     @@ -635,6 +635,45 @@ static inline bool is_first_ethertype_ip(struct sk_buff *skb)
262     return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
263     }
264    
265     +static __be32 mlx5e_get_fcs(struct sk_buff *skb)
266     +{
267     + int last_frag_sz, bytes_in_prev, nr_frags;
268     + u8 *fcs_p1, *fcs_p2;
269     + skb_frag_t *last_frag;
270     + __be32 fcs_bytes;
271     +
272     + if (!skb_is_nonlinear(skb))
273     + return *(__be32 *)(skb->data + skb->len - ETH_FCS_LEN);
274     +
275     + nr_frags = skb_shinfo(skb)->nr_frags;
276     + last_frag = &skb_shinfo(skb)->frags[nr_frags - 1];
277     + last_frag_sz = skb_frag_size(last_frag);
278     +
279     + /* If all FCS data is in last frag */
280     + if (last_frag_sz >= ETH_FCS_LEN)
281     + return *(__be32 *)(skb_frag_address(last_frag) +
282     + last_frag_sz - ETH_FCS_LEN);
283     +
284     + fcs_p2 = (u8 *)skb_frag_address(last_frag);
285     + bytes_in_prev = ETH_FCS_LEN - last_frag_sz;
286     +
287     + /* Find where the other part of the FCS is - Linear or another frag */
288     + if (nr_frags == 1) {
289     + fcs_p1 = skb_tail_pointer(skb);
290     + } else {
291     + skb_frag_t *prev_frag = &skb_shinfo(skb)->frags[nr_frags - 2];
292     +
293     + fcs_p1 = skb_frag_address(prev_frag) +
294     + skb_frag_size(prev_frag);
295     + }
296     + fcs_p1 -= bytes_in_prev;
297     +
298     + memcpy(&fcs_bytes, fcs_p1, bytes_in_prev);
299     + memcpy(((u8 *)&fcs_bytes) + bytes_in_prev, fcs_p2, last_frag_sz);
300     +
301     + return fcs_bytes;
302     +}
303     +
304     static inline void mlx5e_handle_csum(struct net_device *netdev,
305     struct mlx5_cqe64 *cqe,
306     struct mlx5e_rq *rq,
307     @@ -653,6 +692,9 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
308     if (is_first_ethertype_ip(skb)) {
309     skb->ip_summed = CHECKSUM_COMPLETE;
310     skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
311     + if (unlikely(netdev->features & NETIF_F_RXFCS))
312     + skb->csum = csum_add(skb->csum,
313     + (__force __wsum)mlx5e_get_fcs(skb));
314     rq->stats.csum_complete++;
315     return;
316     }
317     diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
318     index 629bfa0cd3f0..27ba476f761d 100644
319     --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
320     +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
321     @@ -77,7 +77,7 @@
322     #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_ ## cli ## _ ## reg ## _RT_OFFSET
323    
324     /* ILT entry structure */
325     -#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
326     +#define ILT_ENTRY_PHY_ADDR_MASK (~0ULL >> 12)
327     #define ILT_ENTRY_PHY_ADDR_SHIFT 0
328     #define ILT_ENTRY_VALID_MASK 0x1ULL
329     #define ILT_ENTRY_VALID_SHIFT 52
330     diff --git a/drivers/net/phy/bcm-cygnus.c b/drivers/net/phy/bcm-cygnus.c
331     index 3fe8cc5c177e..9b27ca264c66 100644
332     --- a/drivers/net/phy/bcm-cygnus.c
333     +++ b/drivers/net/phy/bcm-cygnus.c
334     @@ -61,17 +61,17 @@ static int bcm_cygnus_afe_config(struct phy_device *phydev)
335     return rc;
336    
337     /* make rcal=100, since rdb default is 000 */
338     - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB1, 0x10);
339     + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB1, 0x10);
340     if (rc < 0)
341     return rc;
342    
343     /* CORE_EXPB0, Reset R_CAL/RC_CAL Engine */
344     - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x10);
345     + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x10);
346     if (rc < 0)
347     return rc;
348    
349     /* CORE_EXPB0, Disable Reset R_CAL/RC_CAL Engine */
350     - rc = bcm_phy_write_exp(phydev, MII_BRCM_CORE_EXPB0, 0x00);
351     + rc = bcm_phy_write_exp_sel(phydev, MII_BRCM_CORE_EXPB0, 0x00);
352    
353     return 0;
354     }
355     diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c
356     index 171010eb4d9c..8d96c6f048d0 100644
357     --- a/drivers/net/phy/bcm-phy-lib.c
358     +++ b/drivers/net/phy/bcm-phy-lib.c
359     @@ -56,7 +56,7 @@ int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
360     /* The register must be written to both the Shadow Register Select and
361     * the Shadow Read Register Selector
362     */
363     - phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum |
364     + phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
365     regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
366     return phy_read(phydev, MII_BCM54XX_AUX_CTL);
367     }
368     diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h
369     index 7c73808cbbde..81cceaa412fe 100644
370     --- a/drivers/net/phy/bcm-phy-lib.h
371     +++ b/drivers/net/phy/bcm-phy-lib.h
372     @@ -14,11 +14,18 @@
373     #ifndef _LINUX_BCM_PHY_LIB_H
374     #define _LINUX_BCM_PHY_LIB_H
375    
376     +#include <linux/brcmphy.h>
377     #include <linux/phy.h>
378    
379     int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val);
380     int bcm_phy_read_exp(struct phy_device *phydev, u16 reg);
381    
382     +static inline int bcm_phy_write_exp_sel(struct phy_device *phydev,
383     + u16 reg, u16 val)
384     +{
385     + return bcm_phy_write_exp(phydev, reg | MII_BCM54XX_EXP_SEL_ER, val);
386     +}
387     +
388     int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val);
389     int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum);
390    
391     diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
392     index 8b33f688ac8a..3c5b2a2e2fcc 100644
393     --- a/drivers/net/phy/bcm7xxx.c
394     +++ b/drivers/net/phy/bcm7xxx.c
395     @@ -65,10 +65,10 @@ struct bcm7xxx_phy_priv {
396     static void r_rc_cal_reset(struct phy_device *phydev)
397     {
398     /* Reset R_CAL/RC_CAL Engine */
399     - bcm_phy_write_exp(phydev, 0x00b0, 0x0010);
400     + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
401    
402     /* Disable Reset R_AL/RC_CAL Engine */
403     - bcm_phy_write_exp(phydev, 0x00b0, 0x0000);
404     + bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
405     }
406    
407     static int bcm7xxx_28nm_b0_afe_config_init(struct phy_device *phydev)
408     diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
409     index 8a222ae5950e..83c591713837 100644
410     --- a/drivers/net/team/team.c
411     +++ b/drivers/net/team/team.c
412     @@ -1004,7 +1004,8 @@ static void team_port_disable(struct team *team,
413     static void __team_compute_features(struct team *team)
414     {
415     struct team_port *port;
416     - u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
417     + netdev_features_t vlan_features = TEAM_VLAN_FEATURES &
418     + NETIF_F_ALL_FOR_ALL;
419     netdev_features_t enc_features = TEAM_ENC_FEATURES;
420     unsigned short max_hard_header_len = ETH_HLEN;
421     unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
422     diff --git a/drivers/net/tun.c b/drivers/net/tun.c
423     index bc38d54e37b9..3d9ad11e4f28 100644
424     --- a/drivers/net/tun.c
425     +++ b/drivers/net/tun.c
426     @@ -1315,7 +1315,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
427     else
428     *skb_xdp = 0;
429    
430     - preempt_disable();
431     + local_bh_disable();
432     rcu_read_lock();
433     xdp_prog = rcu_dereference(tun->xdp_prog);
434     if (xdp_prog && !*skb_xdp) {
435     @@ -1338,7 +1338,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
436     if (err)
437     goto err_redirect;
438     rcu_read_unlock();
439     - preempt_enable();
440     + local_bh_enable();
441     return NULL;
442     case XDP_TX:
443     xdp_xmit = true;
444     @@ -1360,7 +1360,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
445     skb = build_skb(buf, buflen);
446     if (!skb) {
447     rcu_read_unlock();
448     - preempt_enable();
449     + local_bh_enable();
450     return ERR_PTR(-ENOMEM);
451     }
452    
453     @@ -1373,12 +1373,12 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
454     skb->dev = tun->dev;
455     generic_xdp_tx(skb, xdp_prog);
456     rcu_read_unlock();
457     - preempt_enable();
458     + local_bh_enable();
459     return NULL;
460     }
461    
462     rcu_read_unlock();
463     - preempt_enable();
464     + local_bh_enable();
465    
466     return skb;
467    
468     @@ -1386,7 +1386,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
469     put_page(alloc_frag->page);
470     err_xdp:
471     rcu_read_unlock();
472     - preempt_enable();
473     + local_bh_enable();
474     this_cpu_inc(tun->pcpu_stats->rx_dropped);
475     return NULL;
476     }
477     @@ -1556,16 +1556,19 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
478     struct bpf_prog *xdp_prog;
479     int ret;
480    
481     + local_bh_disable();
482     rcu_read_lock();
483     xdp_prog = rcu_dereference(tun->xdp_prog);
484     if (xdp_prog) {
485     ret = do_xdp_generic(xdp_prog, skb);
486     if (ret != XDP_PASS) {
487     rcu_read_unlock();
488     + local_bh_enable();
489     return total_len;
490     }
491     }
492     rcu_read_unlock();
493     + local_bh_enable();
494     }
495    
496     rxhash = __skb_get_hash_symmetric(skb);
497     diff --git a/drivers/net/usb/cdc_mbim.c b/drivers/net/usb/cdc_mbim.c
498     index 7220cd620717..0362acd5cdca 100644
499     --- a/drivers/net/usb/cdc_mbim.c
500     +++ b/drivers/net/usb/cdc_mbim.c
501     @@ -609,7 +609,7 @@ static const struct driver_info cdc_mbim_info_ndp_to_end = {
502     */
503     static const struct driver_info cdc_mbim_info_avoid_altsetting_toggle = {
504     .description = "CDC MBIM",
505     - .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN,
506     + .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET | FLAG_WWAN | FLAG_SEND_ZLP,
507     .bind = cdc_mbim_bind,
508     .unbind = cdc_mbim_unbind,
509     .manage_power = cdc_mbim_manage_power,
510     diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
511     index 948611317c97..9e93e7a5df7e 100644
512     --- a/drivers/net/virtio_net.c
513     +++ b/drivers/net/virtio_net.c
514     @@ -632,6 +632,13 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
515     void *data;
516     u32 act;
517    
518     + /* Transient failure which in theory could occur if
519     + * in-flight packets from before XDP was enabled reach
520     + * the receive path after XDP is loaded.
521     + */
522     + if (unlikely(hdr->hdr.gso_type))
523     + goto err_xdp;
524     +
525     /* This happens when rx buffer size is underestimated */
526     if (unlikely(num_buf > 1 ||
527     headroom < virtnet_get_headroom(vi))) {
528     @@ -647,14 +654,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
529     xdp_page = page;
530     }
531    
532     - /* Transient failure which in theory could occur if
533     - * in-flight packets from before XDP was enabled reach
534     - * the receive path after XDP is loaded. In practice I
535     - * was not able to create this condition.
536     - */
537     - if (unlikely(hdr->hdr.gso_type))
538     - goto err_xdp;
539     -
540     /* Allow consuming headroom but reserve enough space to push
541     * the descriptor on if we get an XDP_TX return code.
542     */
543     @@ -688,7 +687,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
544     trace_xdp_exception(vi->dev, xdp_prog, act);
545     ewma_pkt_len_add(&rq->mrg_avg_pkt_len, len);
546     if (unlikely(xdp_page != page))
547     - goto err_xdp;
548     + put_page(page);
549     rcu_read_unlock();
550     goto xdp_xmit;
551     default:
552     @@ -777,7 +776,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
553     rcu_read_unlock();
554     err_skb:
555     put_page(page);
556     - while (--num_buf) {
557     + while (num_buf-- > 1) {
558     buf = virtqueue_get_buf(rq->vq, &len);
559     if (unlikely(!buf)) {
560     pr_debug("%s: rx error: %d buffers missing\n",
561     diff --git a/drivers/pci/host/pci-hyperv.c b/drivers/pci/host/pci-hyperv.c
562     index c91662927de0..0b750228ad70 100644
563     --- a/drivers/pci/host/pci-hyperv.c
564     +++ b/drivers/pci/host/pci-hyperv.c
565     @@ -566,6 +566,26 @@ static void put_pcichild(struct hv_pci_dev *hv_pcidev,
566     static void get_hvpcibus(struct hv_pcibus_device *hv_pcibus);
567     static void put_hvpcibus(struct hv_pcibus_device *hv_pcibus);
568    
569     +/*
570     + * There is no good way to get notified from vmbus_onoffer_rescind(),
571     + * so let's use polling here, since this is not a hot path.
572     + */
573     +static int wait_for_response(struct hv_device *hdev,
574     + struct completion *comp)
575     +{
576     + while (true) {
577     + if (hdev->channel->rescind) {
578     + dev_warn_once(&hdev->device, "The device is gone.\n");
579     + return -ENODEV;
580     + }
581     +
582     + if (wait_for_completion_timeout(comp, HZ / 10))
583     + break;
584     + }
585     +
586     + return 0;
587     +}
588     +
589     /**
590     * devfn_to_wslot() - Convert from Linux PCI slot to Windows
591     * @devfn: The Linux representation of PCI slot
592     @@ -1582,7 +1602,8 @@ static struct hv_pci_dev *new_pcichild_device(struct hv_pcibus_device *hbus,
593     if (ret)
594     goto error;
595    
596     - wait_for_completion(&comp_pkt.host_event);
597     + if (wait_for_response(hbus->hdev, &comp_pkt.host_event))
598     + goto error;
599    
600     hpdev->desc = *desc;
601     refcount_set(&hpdev->refs, 1);
602     @@ -2075,15 +2096,16 @@ static int hv_pci_protocol_negotiation(struct hv_device *hdev)
603     sizeof(struct pci_version_request),
604     (unsigned long)pkt, VM_PKT_DATA_INBAND,
605     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
606     + if (!ret)
607     + ret = wait_for_response(hdev, &comp_pkt.host_event);
608     +
609     if (ret) {
610     dev_err(&hdev->device,
611     - "PCI Pass-through VSP failed sending version reqquest: %#x",
612     + "PCI Pass-through VSP failed to request version: %d",
613     ret);
614     goto exit;
615     }
616    
617     - wait_for_completion(&comp_pkt.host_event);
618     -
619     if (comp_pkt.completion_status >= 0) {
620     pci_protocol_version = pci_protocol_versions[i];
621     dev_info(&hdev->device,
622     @@ -2292,11 +2314,12 @@ static int hv_pci_enter_d0(struct hv_device *hdev)
623     ret = vmbus_sendpacket(hdev->channel, d0_entry, sizeof(*d0_entry),
624     (unsigned long)pkt, VM_PKT_DATA_INBAND,
625     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
626     + if (!ret)
627     + ret = wait_for_response(hdev, &comp_pkt.host_event);
628     +
629     if (ret)
630     goto exit;
631    
632     - wait_for_completion(&comp_pkt.host_event);
633     -
634     if (comp_pkt.completion_status < 0) {
635     dev_err(&hdev->device,
636     "PCI Pass-through VSP failed D0 Entry with status %x\n",
637     @@ -2336,11 +2359,10 @@ static int hv_pci_query_relations(struct hv_device *hdev)
638    
639     ret = vmbus_sendpacket(hdev->channel, &message, sizeof(message),
640     0, VM_PKT_DATA_INBAND, 0);
641     - if (ret)
642     - return ret;
643     + if (!ret)
644     + ret = wait_for_response(hdev, &comp);
645    
646     - wait_for_completion(&comp);
647     - return 0;
648     + return ret;
649     }
650    
651     /**
652     @@ -2410,11 +2432,11 @@ static int hv_send_resources_allocated(struct hv_device *hdev)
653     size_res, (unsigned long)pkt,
654     VM_PKT_DATA_INBAND,
655     VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
656     + if (!ret)
657     + ret = wait_for_response(hdev, &comp_pkt.host_event);
658     if (ret)
659     break;
660    
661     - wait_for_completion(&comp_pkt.host_event);
662     -
663     if (comp_pkt.completion_status < 0) {
664     ret = -EPROTO;
665     dev_err(&hdev->device,
666     diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
667     index 2eb61d54bbb4..ea9e1e0ed5b8 100644
668     --- a/drivers/scsi/sd_zbc.c
669     +++ b/drivers/scsi/sd_zbc.c
670     @@ -423,9 +423,18 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp,
671    
672     #define SD_ZBC_BUF_SIZE 131072
673    
674     -static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
675     +/**
676     + * sd_zbc_check_zone_size - Check the device zone sizes
677     + * @sdkp: Target disk
678     + *
679     + * Check that all zones of the device are equal. The last zone can however
680     + * be smaller. The zone size must also be a power of two number of LBAs.
681     + *
682     + * Returns the zone size in bytes upon success or an error code upon failure.
683     + */
684     +static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
685     {
686     - u64 zone_blocks;
687     + u64 zone_blocks = 0;
688     sector_t block = 0;
689     unsigned char *buf;
690     unsigned char *rec;
691     @@ -434,8 +443,6 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
692     int ret;
693     u8 same;
694    
695     - sdkp->zone_blocks = 0;
696     -
697     /* Get a buffer */
698     buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
699     if (!buf)
700     @@ -443,10 +450,8 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
701    
702     /* Do a report zone to get the same field */
703     ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
704     - if (ret) {
705     - zone_blocks = 0;
706     - goto out;
707     - }
708     + if (ret)
709     + goto out_free;
710    
711     same = buf[4] & 0x0f;
712     if (same > 0) {
713     @@ -472,16 +477,17 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
714    
715     /* Parse zone descriptors */
716     while (rec < buf + buf_len) {
717     - zone_blocks = get_unaligned_be64(&rec[8]);
718     - if (sdkp->zone_blocks == 0) {
719     - sdkp->zone_blocks = zone_blocks;
720     - } else if (zone_blocks != sdkp->zone_blocks &&
721     - (block + zone_blocks < sdkp->capacity
722     - || zone_blocks > sdkp->zone_blocks)) {
723     + u64 this_zone_blocks = get_unaligned_be64(&rec[8]);
724     +
725     + if (zone_blocks == 0) {
726     + zone_blocks = this_zone_blocks;
727     + } else if (this_zone_blocks != zone_blocks &&
728     + (block + this_zone_blocks < sdkp->capacity
729     + || this_zone_blocks > zone_blocks)) {
730     zone_blocks = 0;
731     goto out;
732     }
733     - block += zone_blocks;
734     + block += this_zone_blocks;
735     rec += 64;
736     }
737    
738     @@ -489,61 +495,77 @@ static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
739     ret = sd_zbc_report_zones(sdkp, buf,
740     SD_ZBC_BUF_SIZE, block);
741     if (ret)
742     - return ret;
743     + goto out_free;
744     }
745    
746     } while (block < sdkp->capacity);
747    
748     - zone_blocks = sdkp->zone_blocks;
749     -
750     out:
751     - kfree(buf);
752     -
753     if (!zone_blocks) {
754     if (sdkp->first_scan)
755     sd_printk(KERN_NOTICE, sdkp,
756     "Devices with non constant zone "
757     "size are not supported\n");
758     - return -ENODEV;
759     - }
760     -
761     - if (!is_power_of_2(zone_blocks)) {
762     + ret = -ENODEV;
763     + } else if (!is_power_of_2(zone_blocks)) {
764     if (sdkp->first_scan)
765     sd_printk(KERN_NOTICE, sdkp,
766     "Devices with non power of 2 zone "
767     "size are not supported\n");
768     - return -ENODEV;
769     - }
770     -
771     - if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
772     + ret = -ENODEV;
773     + } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
774     if (sdkp->first_scan)
775     sd_printk(KERN_NOTICE, sdkp,
776     "Zone size too large\n");
777     - return -ENODEV;
778     + ret = -ENODEV;
779     + } else {
780     + ret = zone_blocks;
781     }
782    
783     - sdkp->zone_blocks = zone_blocks;
784     +out_free:
785     + kfree(buf);
786    
787     - return 0;
788     + return ret;
789     }
790    
791     -static int sd_zbc_setup(struct scsi_disk *sdkp)
792     +static int sd_zbc_setup(struct scsi_disk *sdkp, u32 zone_blocks)
793     {
794     + struct request_queue *q = sdkp->disk->queue;
795     + u32 zone_shift = ilog2(zone_blocks);
796     + u32 nr_zones;
797    
798     /* chunk_sectors indicates the zone size */
799     - blk_queue_chunk_sectors(sdkp->disk->queue,
800     - logical_to_sectors(sdkp->device, sdkp->zone_blocks));
801     - sdkp->zone_shift = ilog2(sdkp->zone_blocks);
802     - sdkp->nr_zones = sdkp->capacity >> sdkp->zone_shift;
803     - if (sdkp->capacity & (sdkp->zone_blocks - 1))
804     - sdkp->nr_zones++;
805     -
806     - if (!sdkp->zones_wlock) {
807     - sdkp->zones_wlock = kcalloc(BITS_TO_LONGS(sdkp->nr_zones),
808     - sizeof(unsigned long),
809     - GFP_KERNEL);
810     - if (!sdkp->zones_wlock)
811     - return -ENOMEM;
812     + blk_queue_chunk_sectors(q,
813     + logical_to_sectors(sdkp->device, zone_blocks));
814     + nr_zones = round_up(sdkp->capacity, zone_blocks) >> zone_shift;
815     +
816     + /*
817     + * Initialize the disk zone write lock bitmap if the number
818     + * of zones changed.
819     + */
820     + if (nr_zones != sdkp->nr_zones) {
821     + unsigned long *zones_wlock = NULL;
822     +
823     + if (nr_zones) {
824     + zones_wlock = kcalloc(BITS_TO_LONGS(nr_zones),
825     + sizeof(unsigned long),
826     + GFP_KERNEL);
827     + if (!zones_wlock)
828     + return -ENOMEM;
829     + }
830     +
831     + blk_mq_freeze_queue(q);
832     + sdkp->zone_blocks = zone_blocks;
833     + sdkp->zone_shift = zone_shift;
834     + sdkp->nr_zones = nr_zones;
835     + swap(sdkp->zones_wlock, zones_wlock);
836     + blk_mq_unfreeze_queue(q);
837     +
838     + kfree(zones_wlock);
839     +
840     + /* READ16/WRITE16 is mandatory for ZBC disks */
841     + sdkp->device->use_16_for_rw = 1;
842     + sdkp->device->use_10_for_rw = 0;
843     }
844    
845     return 0;
846     @@ -552,6 +574,7 @@ static int sd_zbc_setup(struct scsi_disk *sdkp)
847     int sd_zbc_read_zones(struct scsi_disk *sdkp,
848     unsigned char *buf)
849     {
850     + int64_t zone_blocks;
851     int ret;
852    
853     if (!sd_is_zoned(sdkp))
854     @@ -589,19 +612,19 @@ int sd_zbc_read_zones(struct scsi_disk *sdkp,
855     * Check zone size: only devices with a constant zone size (except
856     * an eventual last runt zone) that is a power of 2 are supported.
857     */
858     - ret = sd_zbc_check_zone_size(sdkp);
859     - if (ret)
860     + zone_blocks = sd_zbc_check_zone_size(sdkp);
861     + ret = -EFBIG;
862     + if (zone_blocks != (u32)zone_blocks)
863     + goto err;
864     + ret = zone_blocks;
865     + if (ret < 0)
866     goto err;
867    
868     /* The drive satisfies the kernel restrictions: set it up */
869     - ret = sd_zbc_setup(sdkp);
870     + ret = sd_zbc_setup(sdkp, zone_blocks);
871     if (ret)
872     goto err;
873    
874     - /* READ16/WRITE16 is mandatory for ZBC disks */
875     - sdkp->device->use_16_for_rw = 1;
876     - sdkp->device->use_10_for_rw = 0;
877     -
878     return 0;
879    
880     err:
881     @@ -614,6 +637,7 @@ void sd_zbc_remove(struct scsi_disk *sdkp)
882     {
883     kfree(sdkp->zones_wlock);
884     sdkp->zones_wlock = NULL;
885     + sdkp->nr_zones = 0;
886     }
887    
888     void sd_zbc_print_zones(struct scsi_disk *sdkp)
889     diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
890     index 8e3ca4400766..50e48afd88ff 100644
891     --- a/drivers/vhost/vhost.c
892     +++ b/drivers/vhost/vhost.c
893     @@ -993,6 +993,7 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
894     {
895     int ret = 0;
896    
897     + mutex_lock(&dev->mutex);
898     vhost_dev_lock_vqs(dev);
899     switch (msg->type) {
900     case VHOST_IOTLB_UPDATE:
901     @@ -1024,6 +1025,8 @@ static int vhost_process_iotlb_msg(struct vhost_dev *dev,
902     }
903    
904     vhost_dev_unlock_vqs(dev);
905     + mutex_unlock(&dev->mutex);
906     +
907     return ret;
908     }
909     ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
910     diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
911     index 27d59cf36341..b475d1ebbbbf 100644
912     --- a/fs/btrfs/disk-io.c
913     +++ b/fs/btrfs/disk-io.c
914     @@ -59,7 +59,8 @@
915     BTRFS_HEADER_FLAG_RELOC |\
916     BTRFS_SUPER_FLAG_ERROR |\
917     BTRFS_SUPER_FLAG_SEEDING |\
918     - BTRFS_SUPER_FLAG_METADUMP)
919     + BTRFS_SUPER_FLAG_METADUMP |\
920     + BTRFS_SUPER_FLAG_METADUMP_V2)
921    
922     static const struct extent_io_ops btree_extent_io_ops;
923     static void end_workqueue_fn(struct btrfs_work *work);
924     diff --git a/include/net/ipv6.h b/include/net/ipv6.h
925     index 9596aa93d6ef..a54b8c58ccb7 100644
926     --- a/include/net/ipv6.h
927     +++ b/include/net/ipv6.h
928     @@ -861,6 +861,11 @@ static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel)
929     return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel;
930     }
931    
932     +static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
933     +{
934     + return fl6->flowlabel & IPV6_FLOWLABEL_MASK;
935     +}
936     +
937     /*
938     * Prototypes exported by ipv6
939     */
940     diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
941     index 8f659bb7badc..7115838fbf2a 100644
942     --- a/include/uapi/linux/btrfs_tree.h
943     +++ b/include/uapi/linux/btrfs_tree.h
944     @@ -456,6 +456,7 @@ struct btrfs_free_space_header {
945    
946     #define BTRFS_SUPER_FLAG_SEEDING (1ULL << 32)
947     #define BTRFS_SUPER_FLAG_METADUMP (1ULL << 33)
948     +#define BTRFS_SUPER_FLAG_METADUMP_V2 (1ULL << 34)
949    
950    
951     /*
952     diff --git a/mm/mmap.c b/mm/mmap.c
953     index 11f96fad5271..f858b1f336af 100644
954     --- a/mm/mmap.c
955     +++ b/mm/mmap.c
956     @@ -1315,6 +1315,35 @@ static inline int mlock_future_check(struct mm_struct *mm,
957     return 0;
958     }
959    
960     +static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
961     +{
962     + if (S_ISREG(inode->i_mode))
963     + return MAX_LFS_FILESIZE;
964     +
965     + if (S_ISBLK(inode->i_mode))
966     + return MAX_LFS_FILESIZE;
967     +
968     + /* Special "we do even unsigned file positions" case */
969     + if (file->f_mode & FMODE_UNSIGNED_OFFSET)
970     + return 0;
971     +
972     + /* Yes, random drivers might want more. But I'm tired of buggy drivers */
973     + return ULONG_MAX;
974     +}
975     +
976     +static inline bool file_mmap_ok(struct file *file, struct inode *inode,
977     + unsigned long pgoff, unsigned long len)
978     +{
979     + u64 maxsize = file_mmap_size_max(file, inode);
980     +
981     + if (maxsize && len > maxsize)
982     + return false;
983     + maxsize -= len;
984     + if (pgoff > maxsize >> PAGE_SHIFT)
985     + return false;
986     + return true;
987     +}
988     +
989     /*
990     * The caller must hold down_write(&current->mm->mmap_sem).
991     */
992     @@ -1388,6 +1417,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
993     if (file) {
994     struct inode *inode = file_inode(file);
995    
996     + if (!file_mmap_ok(file, inode, pgoff, len))
997     + return -EOVERFLOW;
998     +
999     switch (flags & MAP_TYPE) {
1000     case MAP_SHARED:
1001     if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1002     diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
1003     index f950b80c0dd1..d8796a7874b6 100644
1004     --- a/net/core/flow_dissector.c
1005     +++ b/net/core/flow_dissector.c
1006     @@ -1179,7 +1179,7 @@ __u32 __get_hash_from_flowi6(const struct flowi6 *fl6, struct flow_keys *keys)
1007     keys->ports.src = fl6->fl6_sport;
1008     keys->ports.dst = fl6->fl6_dport;
1009     keys->keyid.keyid = fl6->fl6_gre_key;
1010     - keys->tags.flow_label = (__force u32)fl6->flowlabel;
1011     + keys->tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
1012     keys->basic.ip_proto = fl6->flowi6_proto;
1013    
1014     return flow_hash_from_keys(keys);
1015     diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
1016     index 927a6dcbad96..8f17724a173c 100644
1017     --- a/net/core/net-sysfs.c
1018     +++ b/net/core/net-sysfs.c
1019     @@ -1207,9 +1207,6 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
1020     cpumask_var_t mask;
1021     unsigned long index;
1022    
1023     - if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1024     - return -ENOMEM;
1025     -
1026     index = get_netdev_queue_index(queue);
1027    
1028     if (dev->num_tc) {
1029     @@ -1219,6 +1216,9 @@ static ssize_t xps_cpus_show(struct netdev_queue *queue,
1030     return -EINVAL;
1031     }
1032    
1033     + if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
1034     + return -ENOMEM;
1035     +
1036     rcu_read_lock();
1037     dev_maps = rcu_dereference(dev->xps_maps);
1038     if (dev_maps) {
1039     diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
1040     index 5ace48926b19..4cfdad08aca0 100644
1041     --- a/net/core/rtnetlink.c
1042     +++ b/net/core/rtnetlink.c
1043     @@ -1958,6 +1958,10 @@ static int do_setlink(const struct sk_buff *skb,
1044     const struct net_device_ops *ops = dev->netdev_ops;
1045     int err;
1046    
1047     + err = validate_linkmsg(dev, tb);
1048     + if (err < 0)
1049     + return err;
1050     +
1051     if (tb[IFLA_NET_NS_PID] || tb[IFLA_NET_NS_FD]) {
1052     struct net *net = rtnl_link_get_net(dev_net(dev), tb);
1053     if (IS_ERR(net)) {
1054     @@ -2296,10 +2300,6 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh,
1055     goto errout;
1056     }
1057    
1058     - err = validate_linkmsg(dev, tb);
1059     - if (err < 0)
1060     - goto errout;
1061     -
1062     err = do_setlink(skb, dev, ifm, extack, tb, ifname, 0);
1063     errout:
1064     return err;
1065     diff --git a/net/dccp/proto.c b/net/dccp/proto.c
1066     index ff3b058cf58c..936dab12f99f 100644
1067     --- a/net/dccp/proto.c
1068     +++ b/net/dccp/proto.c
1069     @@ -280,9 +280,7 @@ int dccp_disconnect(struct sock *sk, int flags)
1070    
1071     dccp_clear_xmit_timers(sk);
1072     ccid_hc_rx_delete(dp->dccps_hc_rx_ccid, sk);
1073     - ccid_hc_tx_delete(dp->dccps_hc_tx_ccid, sk);
1074     dp->dccps_hc_rx_ccid = NULL;
1075     - dp->dccps_hc_tx_ccid = NULL;
1076    
1077     __skb_queue_purge(&sk->sk_receive_queue);
1078     __skb_queue_purge(&sk->sk_write_queue);
1079     diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
1080     index d72874150905..df8fd3ce713d 100644
1081     --- a/net/ipv4/fib_frontend.c
1082     +++ b/net/ipv4/fib_frontend.c
1083     @@ -625,6 +625,7 @@ const struct nla_policy rtm_ipv4_policy[RTA_MAX + 1] = {
1084     [RTA_ENCAP] = { .type = NLA_NESTED },
1085     [RTA_UID] = { .type = NLA_U32 },
1086     [RTA_MARK] = { .type = NLA_U32 },
1087     + [RTA_TABLE] = { .type = NLA_U32 },
1088     };
1089    
1090     static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
1091     diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
1092     index f39955913d3f..b557af72cde9 100644
1093     --- a/net/ipv4/fib_semantics.c
1094     +++ b/net/ipv4/fib_semantics.c
1095     @@ -725,6 +725,8 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi)
1096     nla_strlcpy(tmp, nla, sizeof(tmp));
1097     val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1098     } else {
1099     + if (nla_len(nla) != sizeof(u32))
1100     + return false;
1101     val = nla_get_u32(nla);
1102     }
1103    
1104     @@ -1051,6 +1053,8 @@ fib_convert_metrics(struct fib_info *fi, const struct fib_config *cfg)
1105     if (val == TCP_CA_UNSPEC)
1106     return -EINVAL;
1107     } else {
1108     + if (nla_len(nla) != sizeof(u32))
1109     + return -EINVAL;
1110     val = nla_get_u32(nla);
1111     }
1112     if (type == RTAX_ADVMSS && val > 65535 - 40)
1113     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
1114     index 1e70ed5244ea..d07ba4d5917b 100644
1115     --- a/net/ipv4/ip_sockglue.c
1116     +++ b/net/ipv4/ip_sockglue.c
1117     @@ -511,8 +511,6 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1118     int err;
1119     int copied;
1120    
1121     - WARN_ON_ONCE(sk->sk_family == AF_INET6);
1122     -
1123     err = -EAGAIN;
1124     skb = sock_dequeue_err_skb(sk);
1125     if (!skb)
1126     diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
1127     index c9b3e6e069ae..cbd9c0d8a788 100644
1128     --- a/net/ipv4/ipmr.c
1129     +++ b/net/ipv4/ipmr.c
1130     @@ -323,6 +323,7 @@ static const struct rhashtable_params ipmr_rht_params = {
1131     static struct mr_table *ipmr_new_table(struct net *net, u32 id)
1132     {
1133     struct mr_table *mrt;
1134     + int err;
1135    
1136     /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
1137     if (id != RT_TABLE_DEFAULT && id >= 1000000000)
1138     @@ -338,7 +339,11 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
1139     write_pnet(&mrt->net, net);
1140     mrt->id = id;
1141    
1142     - rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
1143     + err = rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
1144     + if (err) {
1145     + kfree(mrt);
1146     + return ERR_PTR(err);
1147     + }
1148     INIT_LIST_HEAD(&mrt->mfc_cache_list);
1149     INIT_LIST_HEAD(&mrt->mfc_unres_queue);
1150    
1151     diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
1152     index 0f2d74885bcb..32fcce711855 100644
1153     --- a/net/ipv6/ip6_output.c
1154     +++ b/net/ipv6/ip6_output.c
1155     @@ -506,7 +506,8 @@ int ip6_forward(struct sk_buff *skb)
1156     send redirects to source routed frames.
1157     We don't send redirects to frames decapsulated from IPsec.
1158     */
1159     - if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
1160     + if (IP6CB(skb)->iif == dst->dev->ifindex &&
1161     + opt->srcrt == 0 && !skb_sec_path(skb)) {
1162     struct in6_addr *target = NULL;
1163     struct inet_peer *peer;
1164     struct rt6_info *rt;
1165     diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
1166     index 565a0388587a..84ee2eb88121 100644
1167     --- a/net/ipv6/ip6_tunnel.c
1168     +++ b/net/ipv6/ip6_tunnel.c
1169     @@ -1693,8 +1693,13 @@ int ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1170     if (new_mtu < ETH_MIN_MTU)
1171     return -EINVAL;
1172     }
1173     - if (new_mtu > 0xFFF8 - dev->hard_header_len)
1174     - return -EINVAL;
1175     + if (tnl->parms.proto == IPPROTO_IPV6 || tnl->parms.proto == 0) {
1176     + if (new_mtu > IP6_MAX_MTU - dev->hard_header_len)
1177     + return -EINVAL;
1178     + } else {
1179     + if (new_mtu > IP_MAX_MTU - dev->hard_header_len)
1180     + return -EINVAL;
1181     + }
1182     dev->mtu = new_mtu;
1183     return 0;
1184     }
1185     @@ -1842,7 +1847,7 @@ ip6_tnl_dev_init_gen(struct net_device *dev)
1186     if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
1187     dev->mtu -= 8;
1188     dev->min_mtu = ETH_MIN_MTU;
1189     - dev->max_mtu = 0xFFF8 - dev->hard_header_len;
1190     + dev->max_mtu = IP6_MAX_MTU - dev->hard_header_len;
1191    
1192     return 0;
1193    
1194     diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
1195     index e1060f28410d..8015e74fd7d9 100644
1196     --- a/net/ipv6/ip6mr.c
1197     +++ b/net/ipv6/ip6mr.c
1198     @@ -1795,7 +1795,8 @@ int ip6_mroute_setsockopt(struct sock *sk, int optname, char __user *optval, uns
1199     ret = 0;
1200     if (!ip6mr_new_table(net, v))
1201     ret = -ENOMEM;
1202     - raw6_sk(sk)->ip6mr_table = v;
1203     + else
1204     + raw6_sk(sk)->ip6mr_table = v;
1205     rtnl_unlock();
1206     return ret;
1207     }
1208     diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
1209     index dd28005efb97..d081db125905 100644
1210     --- a/net/ipv6/ndisc.c
1211     +++ b/net/ipv6/ndisc.c
1212     @@ -1568,6 +1568,12 @@ void ndisc_send_redirect(struct sk_buff *skb, const struct in6_addr *target)
1213     ops_data_buf[NDISC_OPS_REDIRECT_DATA_SPACE], *ops_data = NULL;
1214     bool ret;
1215    
1216     + if (netif_is_l3_master(skb->dev)) {
1217     + dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
1218     + if (!dev)
1219     + return;
1220     + }
1221     +
1222     if (ipv6_get_lladdr(dev, &saddr_buf, IFA_F_TENTATIVE)) {
1223     ND_PRINTK(2, warn, "Redirect: no link-local address on %s\n",
1224     dev->name);
1225     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1226     index 7d50d889ab6e..375b20d5bbd7 100644
1227     --- a/net/ipv6/route.c
1228     +++ b/net/ipv6/route.c
1229     @@ -1250,7 +1250,7 @@ static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1230     keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1231     keys->addrs.v6addrs.src = key_iph->saddr;
1232     keys->addrs.v6addrs.dst = key_iph->daddr;
1233     - keys->tags.flow_label = ip6_flowinfo(key_iph);
1234     + keys->tags.flow_label = ip6_flowlabel(key_iph);
1235     keys->basic.ip_proto = key_iph->nexthdr;
1236     }
1237    
1238     diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
1239     index 5fe139484919..bf4763fd68c2 100644
1240     --- a/net/ipv6/seg6_iptunnel.c
1241     +++ b/net/ipv6/seg6_iptunnel.c
1242     @@ -103,7 +103,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto)
1243     hdrlen = (osrh->hdrlen + 1) << 3;
1244     tot_len = hdrlen + sizeof(*hdr);
1245    
1246     - err = skb_cow_head(skb, tot_len);
1247     + err = skb_cow_head(skb, tot_len + skb->mac_len);
1248     if (unlikely(err))
1249     return err;
1250    
1251     @@ -161,7 +161,7 @@ int seg6_do_srh_inline(struct sk_buff *skb, struct ipv6_sr_hdr *osrh)
1252    
1253     hdrlen = (osrh->hdrlen + 1) << 3;
1254    
1255     - err = skb_cow_head(skb, hdrlen);
1256     + err = skb_cow_head(skb, hdrlen + skb->mac_len);
1257     if (unlikely(err))
1258     return err;
1259    
1260     diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1261     index ad1e7e6ce009..5d00a38cd1cb 100644
1262     --- a/net/ipv6/sit.c
1263     +++ b/net/ipv6/sit.c
1264     @@ -1360,7 +1360,7 @@ static void ipip6_tunnel_setup(struct net_device *dev)
1265     dev->hard_header_len = LL_MAX_HEADER + t_hlen;
1266     dev->mtu = ETH_DATA_LEN - t_hlen;
1267     dev->min_mtu = IPV6_MIN_MTU;
1268     - dev->max_mtu = 0xFFF8 - t_hlen;
1269     + dev->max_mtu = IP6_MAX_MTU - t_hlen;
1270     dev->flags = IFF_NOARP;
1271     netif_keep_dst(dev);
1272     dev->addr_len = 4;
1273     @@ -1572,7 +1572,8 @@ static int ipip6_newlink(struct net *src_net, struct net_device *dev,
1274     if (tb[IFLA_MTU]) {
1275     u32 mtu = nla_get_u32(tb[IFLA_MTU]);
1276    
1277     - if (mtu >= IPV6_MIN_MTU && mtu <= 0xFFF8 - dev->hard_header_len)
1278     + if (mtu >= IPV6_MIN_MTU &&
1279     + mtu <= IP6_MAX_MTU - dev->hard_header_len)
1280     dev->mtu = mtu;
1281     }
1282    
1283     diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
1284     index 01a4ff3df60b..9bf997404918 100644
1285     --- a/net/kcm/kcmsock.c
1286     +++ b/net/kcm/kcmsock.c
1287     @@ -1672,7 +1672,7 @@ static struct file *kcm_clone(struct socket *osock)
1288     __module_get(newsock->ops->owner);
1289    
1290     newsk = sk_alloc(sock_net(osock->sk), PF_KCM, GFP_KERNEL,
1291     - &kcm_proto, true);
1292     + &kcm_proto, false);
1293     if (!newsk) {
1294     sock_release(newsock);
1295     return ERR_PTR(-ENOMEM);
1296     diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
1297     index 8351faabba62..7806e166669a 100644
1298     --- a/net/packet/af_packet.c
1299     +++ b/net/packet/af_packet.c
1300     @@ -2920,7 +2920,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
1301     if (unlikely(offset < 0))
1302     goto out_free;
1303     } else if (reserve) {
1304     - skb_push(skb, reserve);
1305     + skb_reserve(skb, -reserve);
1306     }
1307    
1308     /* Returns -EFAULT on error */
1309     @@ -4293,7 +4293,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
1310     goto out;
1311     if (po->tp_version >= TPACKET_V3 &&
1312     req->tp_block_size <=
1313     - BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv))
1314     + BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + sizeof(struct tpacket3_hdr))
1315     goto out;
1316     if (unlikely(req->tp_frame_size < po->tp_hdrlen +
1317     po->tp_reserve))
1318     diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
1319     index 7a838d1c1c00..1879665e5a2b 100644
1320     --- a/net/sched/cls_flower.c
1321     +++ b/net/sched/cls_flower.c
1322     @@ -1007,7 +1007,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
1323     return 0;
1324    
1325     errout_idr:
1326     - if (fnew->handle)
1327     + if (!fold)
1328     idr_remove_ext(&head->handle_idr, fnew->handle);
1329     errout:
1330     tcf_exts_destroy(&fnew->exts);
1331     diff --git a/net/sctp/transport.c b/net/sctp/transport.c
1332     index 7ef77fd7b52a..e0c2a4e23039 100644
1333     --- a/net/sctp/transport.c
1334     +++ b/net/sctp/transport.c
1335     @@ -637,7 +637,7 @@ unsigned long sctp_transport_timeout(struct sctp_transport *trans)
1336     trans->state != SCTP_PF)
1337     timeout += trans->hbinterval;
1338    
1339     - return timeout;
1340     + return max_t(unsigned long, timeout, HZ / 5);
1341     }
1342    
1343     /* Reset transport variables to their initial values */
1344     diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c
1345     index 297b079ae4d9..27aac273205b 100644
1346     --- a/scripts/kconfig/confdata.c
1347     +++ b/scripts/kconfig/confdata.c
1348     @@ -745,7 +745,7 @@ int conf_write(const char *name)
1349     struct menu *menu;
1350     const char *basename;
1351     const char *str;
1352     - char dirname[PATH_MAX+1], tmpname[PATH_MAX+1], newname[PATH_MAX+1];
1353     + char dirname[PATH_MAX+1], tmpname[PATH_MAX+22], newname[PATH_MAX+8];
1354     char *env;
1355    
1356     dirname[0] = 0;