Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.19/0161-4.19.62-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3440 - (hide annotations) (download)
Fri Aug 2 11:48:07 2019 UTC (4 years, 10 months ago) by niro
File size: 74141 byte(s)
-linux-4.19.62
1 niro 3440 diff --git a/Makefile b/Makefile
2     index b16485c580d7..a4463d880ae2 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 19
9     -SUBLEVEL = 61
10     +SUBLEVEL = 62
11     EXTRAVERSION =
12     NAME = "People's Front"
13    
14     diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
15     index 705593d40d12..05c60fa4fa06 100644
16     --- a/arch/mips/jz4740/board-qi_lb60.c
17     +++ b/arch/mips/jz4740/board-qi_lb60.c
18     @@ -471,27 +471,27 @@ static unsigned long pin_cfg_bias_disable[] = {
19     static struct pinctrl_map pin_map[] __initdata = {
20     /* NAND pin configuration */
21     PIN_MAP_MUX_GROUP_DEFAULT("jz4740-nand",
22     - "10010000.jz4740-pinctrl", "nand", "nand-cs1"),
23     + "10010000.pin-controller", "nand-cs1", "nand"),
24    
25     /* fbdev pin configuration */
26     PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_DEFAULT,
27     - "10010000.jz4740-pinctrl", "lcd", "lcd-8bit"),
28     + "10010000.pin-controller", "lcd-8bit", "lcd"),
29     PIN_MAP_MUX_GROUP("jz4740-fb", PINCTRL_STATE_SLEEP,
30     - "10010000.jz4740-pinctrl", "lcd", "lcd-no-pins"),
31     + "10010000.pin-controller", "lcd-no-pins", "lcd"),
32    
33     /* MMC pin configuration */
34     PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
35     - "10010000.jz4740-pinctrl", "mmc", "mmc-1bit"),
36     + "10010000.pin-controller", "mmc-1bit", "mmc"),
37     PIN_MAP_MUX_GROUP_DEFAULT("jz4740-mmc.0",
38     - "10010000.jz4740-pinctrl", "mmc", "mmc-4bit"),
39     + "10010000.pin-controller", "mmc-4bit", "mmc"),
40     PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
41     - "10010000.jz4740-pinctrl", "PD0", pin_cfg_bias_disable),
42     + "10010000.pin-controller", "PD0", pin_cfg_bias_disable),
43     PIN_MAP_CONFIGS_PIN_DEFAULT("jz4740-mmc.0",
44     - "10010000.jz4740-pinctrl", "PD2", pin_cfg_bias_disable),
45     + "10010000.pin-controller", "PD2", pin_cfg_bias_disable),
46    
47     /* PWM pin configuration */
48     PIN_MAP_MUX_GROUP_DEFAULT("jz4740-pwm",
49     - "10010000.jz4740-pinctrl", "pwm4", "pwm4"),
50     + "10010000.pin-controller", "pwm4", "pwm4"),
51     };
52    
53    
54     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
55     index 73d6d585dd66..4cf16378dffe 100644
56     --- a/arch/x86/kvm/vmx.c
57     +++ b/arch/x86/kvm/vmx.c
58     @@ -8457,6 +8457,7 @@ static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
59     {
60     vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
61     vmcs_write64(VMCS_LINK_POINTER, -1ull);
62     + vmx->nested.sync_shadow_vmcs = false;
63     }
64    
65     static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
66     @@ -8468,7 +8469,6 @@ static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
67     /* copy to memory all shadowed fields in case
68     they were modified */
69     copy_shadow_to_vmcs12(vmx);
70     - vmx->nested.sync_shadow_vmcs = false;
71     vmx_disable_shadow_vmcs(vmx);
72     }
73     vmx->nested.posted_intr_nv = -1;
74     @@ -8490,6 +8490,8 @@ static void free_nested(struct vcpu_vmx *vmx)
75     if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
76     return;
77    
78     + kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, &vmx->vcpu);
79     +
80     hrtimer_cancel(&vmx->nested.preemption_timer);
81     vmx->nested.vmxon = false;
82     vmx->nested.smm.vmxon = false;
83     @@ -8668,6 +8670,9 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
84     u64 field_value;
85     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
86    
87     + if (WARN_ON(!shadow_vmcs))
88     + return;
89     +
90     preempt_disable();
91    
92     vmcs_load(shadow_vmcs);
93     @@ -8706,6 +8711,9 @@ static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
94     u64 field_value = 0;
95     struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
96    
97     + if (WARN_ON(!shadow_vmcs))
98     + return;
99     +
100     vmcs_load(shadow_vmcs);
101    
102     for (q = 0; q < ARRAY_SIZE(fields); q++) {
103     diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
104     index 13884474d158..69842145c223 100644
105     --- a/drivers/dma-buf/dma-buf.c
106     +++ b/drivers/dma-buf/dma-buf.c
107     @@ -1069,6 +1069,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
108     fence->ops->get_driver_name(fence),
109     fence->ops->get_timeline_name(fence),
110     dma_fence_is_signaled(fence) ? "" : "un");
111     + dma_fence_put(fence);
112     }
113     rcu_read_unlock();
114    
115     diff --git a/drivers/dma-buf/reservation.c b/drivers/dma-buf/reservation.c
116     index 6c95f61a32e7..49ab09468ba1 100644
117     --- a/drivers/dma-buf/reservation.c
118     +++ b/drivers/dma-buf/reservation.c
119     @@ -416,6 +416,10 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
120     GFP_NOWAIT | __GFP_NOWARN);
121     if (!nshared) {
122     rcu_read_unlock();
123     +
124     + dma_fence_put(fence_excl);
125     + fence_excl = NULL;
126     +
127     nshared = krealloc(shared, sz, GFP_KERNEL);
128     if (nshared) {
129     shared = nshared;
130     diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
131     index a5ece8ea79bc..abb332d15a13 100644
132     --- a/drivers/gpio/gpio-davinci.c
133     +++ b/drivers/gpio/gpio-davinci.c
134     @@ -222,8 +222,9 @@ static int davinci_gpio_probe(struct platform_device *pdev)
135     for (i = 0; i < nirq; i++) {
136     chips->irqs[i] = platform_get_irq(pdev, i);
137     if (chips->irqs[i] < 0) {
138     - dev_info(dev, "IRQ not populated, err = %d\n",
139     - chips->irqs[i]);
140     + if (chips->irqs[i] != -EPROBE_DEFER)
141     + dev_info(dev, "IRQ not populated, err = %d\n",
142     + chips->irqs[i]);
143     return chips->irqs[i];
144     }
145     }
146     diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
147     index 433a14b9f731..253a1bbe37e8 100644
148     --- a/drivers/net/caif/caif_hsi.c
149     +++ b/drivers/net/caif/caif_hsi.c
150     @@ -1455,7 +1455,7 @@ static void __exit cfhsi_exit_module(void)
151     rtnl_lock();
152     list_for_each_safe(list_node, n, &cfhsi_list) {
153     cfhsi = list_entry(list_node, struct cfhsi, list);
154     - unregister_netdev(cfhsi->ndev);
155     + unregister_netdevice(cfhsi->ndev);
156     }
157     rtnl_unlock();
158     }
159     diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
160     index 411cfb806459..703e6bdaf0e1 100644
161     --- a/drivers/net/dsa/mv88e6xxx/chip.c
162     +++ b/drivers/net/dsa/mv88e6xxx/chip.c
163     @@ -4816,6 +4816,8 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
164     err = PTR_ERR(chip->reset);
165     goto out;
166     }
167     + if (chip->reset)
168     + usleep_range(1000, 2000);
169    
170     err = mv88e6xxx_detect(chip);
171     if (err)
172     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
173     index e3ce29951c5e..3edb81a4f075 100644
174     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
175     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
176     @@ -286,6 +286,9 @@ int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
177     hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
178     sw_cons = txdata->tx_pkt_cons;
179    
180     + /* Ensure subsequent loads occur after hw_cons */
181     + smp_rmb();
182     +
183     while (sw_cons != hw_cons) {
184     u16 pkt_cons;
185    
186     diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
187     index 2d6f090bf644..fd587bed32eb 100644
188     --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
189     +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
190     @@ -3086,39 +3086,42 @@ static void bcmgenet_timeout(struct net_device *dev)
191     netif_tx_wake_all_queues(dev);
192     }
193    
194     -#define MAX_MC_COUNT 16
195     +#define MAX_MDF_FILTER 17
196    
197     static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
198     unsigned char *addr,
199     - int *i,
200     - int *mc)
201     + int *i)
202     {
203     - u32 reg;
204     -
205     bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
206     UMAC_MDF_ADDR + (*i * 4));
207     bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
208     addr[4] << 8 | addr[5],
209     UMAC_MDF_ADDR + ((*i + 1) * 4));
210     - reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
211     - reg |= (1 << (MAX_MC_COUNT - *mc));
212     - bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
213     *i += 2;
214     - (*mc)++;
215     }
216    
217     static void bcmgenet_set_rx_mode(struct net_device *dev)
218     {
219     struct bcmgenet_priv *priv = netdev_priv(dev);
220     struct netdev_hw_addr *ha;
221     - int i, mc;
222     + int i, nfilter;
223     u32 reg;
224    
225     netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
226    
227     - /* Promiscuous mode */
228     + /* Number of filters needed */
229     + nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
230     +
231     + /*
232     + * Turn on promicuous mode for three scenarios
233     + * 1. IFF_PROMISC flag is set
234     + * 2. IFF_ALLMULTI flag is set
235     + * 3. The number of filters needed exceeds the number filters
236     + * supported by the hardware.
237     + */
238     reg = bcmgenet_umac_readl(priv, UMAC_CMD);
239     - if (dev->flags & IFF_PROMISC) {
240     + if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
241     + (nfilter > MAX_MDF_FILTER)) {
242     reg |= CMD_PROMISC;
243     bcmgenet_umac_writel(priv, reg, UMAC_CMD);
244     bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
245     @@ -3128,32 +3131,24 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
246     bcmgenet_umac_writel(priv, reg, UMAC_CMD);
247     }
248    
249     - /* UniMac doesn't support ALLMULTI */
250     - if (dev->flags & IFF_ALLMULTI) {
251     - netdev_warn(dev, "ALLMULTI is not supported\n");
252     - return;
253     - }
254     -
255     /* update MDF filter */
256     i = 0;
257     - mc = 0;
258     /* Broadcast */
259     - bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
260     + bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
261     /* my own address.*/
262     - bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
263     - /* Unicast list*/
264     - if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
265     - return;
266     + bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
267    
268     - if (!netdev_uc_empty(dev))
269     - netdev_for_each_uc_addr(ha, dev)
270     - bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
271     - /* Multicast */
272     - if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
273     - return;
274     + /* Unicast */
275     + netdev_for_each_uc_addr(ha, dev)
276     + bcmgenet_set_mdf_addr(priv, ha->addr, &i);
277    
278     + /* Multicast */
279     netdev_for_each_mc_addr(ha, dev)
280     - bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
281     + bcmgenet_set_mdf_addr(priv, ha->addr, &i);
282     +
283     + /* Enable filters */
284     + reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
285     + bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
286     }
287    
288     /* Set the hardware MAC address. */
289     diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
290     index 1485f66cf7b0..4ade864c8d53 100644
291     --- a/drivers/net/ethernet/marvell/sky2.c
292     +++ b/drivers/net/ethernet/marvell/sky2.c
293     @@ -4947,6 +4947,13 @@ static const struct dmi_system_id msi_blacklist[] = {
294     DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
295     },
296     },
297     + {
298     + .ident = "ASUS P6T",
299     + .matches = {
300     + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
301     + DMI_MATCH(DMI_BOARD_NAME, "P6T"),
302     + },
303     + },
304     {}
305     };
306    
307     diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
308     index 5b7fe8264144..db6aafcced0d 100644
309     --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
310     +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
311     @@ -662,7 +662,9 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
312    
313     profile->init(mdev, netdev, profile, ipriv);
314    
315     - mlx5e_attach_netdev(epriv);
316     + err = mlx5e_attach_netdev(epriv);
317     + if (err)
318     + goto detach;
319     netif_carrier_off(netdev);
320    
321     /* set rdma_netdev func pointers */
322     @@ -678,6 +680,11 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
323    
324     return netdev;
325    
326     +detach:
327     + profile->cleanup(epriv);
328     + if (ipriv->sub_interface)
329     + return NULL;
330     + mlx5e_destroy_mdev_resources(mdev);
331     destroy_ht:
332     mlx5i_pkey_qpn_ht_cleanup(netdev);
333     destroy_wq:
334     diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
335     index 7a50b911b180..a6992c4c7313 100644
336     --- a/drivers/net/ethernet/realtek/r8169.c
337     +++ b/drivers/net/ethernet/realtek/r8169.c
338     @@ -5202,6 +5202,143 @@ static void rtl_hw_start_8411_2(struct rtl8169_private *tp)
339     /* disable aspm and clock request before access ephy */
340     rtl_hw_aspm_clkreq_enable(tp, false);
341     rtl_ephy_init(tp, e_info_8411_2, ARRAY_SIZE(e_info_8411_2));
342     +
343     + /* The following Realtek-provided magic fixes an issue with the RX unit
344     + * getting confused after the PHY having been powered-down.
345     + */
346     + r8168_mac_ocp_write(tp, 0xFC28, 0x0000);
347     + r8168_mac_ocp_write(tp, 0xFC2A, 0x0000);
348     + r8168_mac_ocp_write(tp, 0xFC2C, 0x0000);
349     + r8168_mac_ocp_write(tp, 0xFC2E, 0x0000);
350     + r8168_mac_ocp_write(tp, 0xFC30, 0x0000);
351     + r8168_mac_ocp_write(tp, 0xFC32, 0x0000);
352     + r8168_mac_ocp_write(tp, 0xFC34, 0x0000);
353     + r8168_mac_ocp_write(tp, 0xFC36, 0x0000);
354     + mdelay(3);
355     + r8168_mac_ocp_write(tp, 0xFC26, 0x0000);
356     +
357     + r8168_mac_ocp_write(tp, 0xF800, 0xE008);
358     + r8168_mac_ocp_write(tp, 0xF802, 0xE00A);
359     + r8168_mac_ocp_write(tp, 0xF804, 0xE00C);
360     + r8168_mac_ocp_write(tp, 0xF806, 0xE00E);
361     + r8168_mac_ocp_write(tp, 0xF808, 0xE027);
362     + r8168_mac_ocp_write(tp, 0xF80A, 0xE04F);
363     + r8168_mac_ocp_write(tp, 0xF80C, 0xE05E);
364     + r8168_mac_ocp_write(tp, 0xF80E, 0xE065);
365     + r8168_mac_ocp_write(tp, 0xF810, 0xC602);
366     + r8168_mac_ocp_write(tp, 0xF812, 0xBE00);
367     + r8168_mac_ocp_write(tp, 0xF814, 0x0000);
368     + r8168_mac_ocp_write(tp, 0xF816, 0xC502);
369     + r8168_mac_ocp_write(tp, 0xF818, 0xBD00);
370     + r8168_mac_ocp_write(tp, 0xF81A, 0x074C);
371     + r8168_mac_ocp_write(tp, 0xF81C, 0xC302);
372     + r8168_mac_ocp_write(tp, 0xF81E, 0xBB00);
373     + r8168_mac_ocp_write(tp, 0xF820, 0x080A);
374     + r8168_mac_ocp_write(tp, 0xF822, 0x6420);
375     + r8168_mac_ocp_write(tp, 0xF824, 0x48C2);
376     + r8168_mac_ocp_write(tp, 0xF826, 0x8C20);
377     + r8168_mac_ocp_write(tp, 0xF828, 0xC516);
378     + r8168_mac_ocp_write(tp, 0xF82A, 0x64A4);
379     + r8168_mac_ocp_write(tp, 0xF82C, 0x49C0);
380     + r8168_mac_ocp_write(tp, 0xF82E, 0xF009);
381     + r8168_mac_ocp_write(tp, 0xF830, 0x74A2);
382     + r8168_mac_ocp_write(tp, 0xF832, 0x8CA5);
383     + r8168_mac_ocp_write(tp, 0xF834, 0x74A0);
384     + r8168_mac_ocp_write(tp, 0xF836, 0xC50E);
385     + r8168_mac_ocp_write(tp, 0xF838, 0x9CA2);
386     + r8168_mac_ocp_write(tp, 0xF83A, 0x1C11);
387     + r8168_mac_ocp_write(tp, 0xF83C, 0x9CA0);
388     + r8168_mac_ocp_write(tp, 0xF83E, 0xE006);
389     + r8168_mac_ocp_write(tp, 0xF840, 0x74F8);
390     + r8168_mac_ocp_write(tp, 0xF842, 0x48C4);
391     + r8168_mac_ocp_write(tp, 0xF844, 0x8CF8);
392     + r8168_mac_ocp_write(tp, 0xF846, 0xC404);
393     + r8168_mac_ocp_write(tp, 0xF848, 0xBC00);
394     + r8168_mac_ocp_write(tp, 0xF84A, 0xC403);
395     + r8168_mac_ocp_write(tp, 0xF84C, 0xBC00);
396     + r8168_mac_ocp_write(tp, 0xF84E, 0x0BF2);
397     + r8168_mac_ocp_write(tp, 0xF850, 0x0C0A);
398     + r8168_mac_ocp_write(tp, 0xF852, 0xE434);
399     + r8168_mac_ocp_write(tp, 0xF854, 0xD3C0);
400     + r8168_mac_ocp_write(tp, 0xF856, 0x49D9);
401     + r8168_mac_ocp_write(tp, 0xF858, 0xF01F);
402     + r8168_mac_ocp_write(tp, 0xF85A, 0xC526);
403     + r8168_mac_ocp_write(tp, 0xF85C, 0x64A5);
404     + r8168_mac_ocp_write(tp, 0xF85E, 0x1400);
405     + r8168_mac_ocp_write(tp, 0xF860, 0xF007);
406     + r8168_mac_ocp_write(tp, 0xF862, 0x0C01);
407     + r8168_mac_ocp_write(tp, 0xF864, 0x8CA5);
408     + r8168_mac_ocp_write(tp, 0xF866, 0x1C15);
409     + r8168_mac_ocp_write(tp, 0xF868, 0xC51B);
410     + r8168_mac_ocp_write(tp, 0xF86A, 0x9CA0);
411     + r8168_mac_ocp_write(tp, 0xF86C, 0xE013);
412     + r8168_mac_ocp_write(tp, 0xF86E, 0xC519);
413     + r8168_mac_ocp_write(tp, 0xF870, 0x74A0);
414     + r8168_mac_ocp_write(tp, 0xF872, 0x48C4);
415     + r8168_mac_ocp_write(tp, 0xF874, 0x8CA0);
416     + r8168_mac_ocp_write(tp, 0xF876, 0xC516);
417     + r8168_mac_ocp_write(tp, 0xF878, 0x74A4);
418     + r8168_mac_ocp_write(tp, 0xF87A, 0x48C8);
419     + r8168_mac_ocp_write(tp, 0xF87C, 0x48CA);
420     + r8168_mac_ocp_write(tp, 0xF87E, 0x9CA4);
421     + r8168_mac_ocp_write(tp, 0xF880, 0xC512);
422     + r8168_mac_ocp_write(tp, 0xF882, 0x1B00);
423     + r8168_mac_ocp_write(tp, 0xF884, 0x9BA0);
424     + r8168_mac_ocp_write(tp, 0xF886, 0x1B1C);
425     + r8168_mac_ocp_write(tp, 0xF888, 0x483F);
426     + r8168_mac_ocp_write(tp, 0xF88A, 0x9BA2);
427     + r8168_mac_ocp_write(tp, 0xF88C, 0x1B04);
428     + r8168_mac_ocp_write(tp, 0xF88E, 0xC508);
429     + r8168_mac_ocp_write(tp, 0xF890, 0x9BA0);
430     + r8168_mac_ocp_write(tp, 0xF892, 0xC505);
431     + r8168_mac_ocp_write(tp, 0xF894, 0xBD00);
432     + r8168_mac_ocp_write(tp, 0xF896, 0xC502);
433     + r8168_mac_ocp_write(tp, 0xF898, 0xBD00);
434     + r8168_mac_ocp_write(tp, 0xF89A, 0x0300);
435     + r8168_mac_ocp_write(tp, 0xF89C, 0x051E);
436     + r8168_mac_ocp_write(tp, 0xF89E, 0xE434);
437     + r8168_mac_ocp_write(tp, 0xF8A0, 0xE018);
438     + r8168_mac_ocp_write(tp, 0xF8A2, 0xE092);
439     + r8168_mac_ocp_write(tp, 0xF8A4, 0xDE20);
440     + r8168_mac_ocp_write(tp, 0xF8A6, 0xD3C0);
441     + r8168_mac_ocp_write(tp, 0xF8A8, 0xC50F);
442     + r8168_mac_ocp_write(tp, 0xF8AA, 0x76A4);
443     + r8168_mac_ocp_write(tp, 0xF8AC, 0x49E3);
444     + r8168_mac_ocp_write(tp, 0xF8AE, 0xF007);
445     + r8168_mac_ocp_write(tp, 0xF8B0, 0x49C0);
446     + r8168_mac_ocp_write(tp, 0xF8B2, 0xF103);
447     + r8168_mac_ocp_write(tp, 0xF8B4, 0xC607);
448     + r8168_mac_ocp_write(tp, 0xF8B6, 0xBE00);
449     + r8168_mac_ocp_write(tp, 0xF8B8, 0xC606);
450     + r8168_mac_ocp_write(tp, 0xF8BA, 0xBE00);
451     + r8168_mac_ocp_write(tp, 0xF8BC, 0xC602);
452     + r8168_mac_ocp_write(tp, 0xF8BE, 0xBE00);
453     + r8168_mac_ocp_write(tp, 0xF8C0, 0x0C4C);
454     + r8168_mac_ocp_write(tp, 0xF8C2, 0x0C28);
455     + r8168_mac_ocp_write(tp, 0xF8C4, 0x0C2C);
456     + r8168_mac_ocp_write(tp, 0xF8C6, 0xDC00);
457     + r8168_mac_ocp_write(tp, 0xF8C8, 0xC707);
458     + r8168_mac_ocp_write(tp, 0xF8CA, 0x1D00);
459     + r8168_mac_ocp_write(tp, 0xF8CC, 0x8DE2);
460     + r8168_mac_ocp_write(tp, 0xF8CE, 0x48C1);
461     + r8168_mac_ocp_write(tp, 0xF8D0, 0xC502);
462     + r8168_mac_ocp_write(tp, 0xF8D2, 0xBD00);
463     + r8168_mac_ocp_write(tp, 0xF8D4, 0x00AA);
464     + r8168_mac_ocp_write(tp, 0xF8D6, 0xE0C0);
465     + r8168_mac_ocp_write(tp, 0xF8D8, 0xC502);
466     + r8168_mac_ocp_write(tp, 0xF8DA, 0xBD00);
467     + r8168_mac_ocp_write(tp, 0xF8DC, 0x0132);
468     +
469     + r8168_mac_ocp_write(tp, 0xFC26, 0x8000);
470     +
471     + r8168_mac_ocp_write(tp, 0xFC2A, 0x0743);
472     + r8168_mac_ocp_write(tp, 0xFC2C, 0x0801);
473     + r8168_mac_ocp_write(tp, 0xFC2E, 0x0BE9);
474     + r8168_mac_ocp_write(tp, 0xFC30, 0x02FD);
475     + r8168_mac_ocp_write(tp, 0xFC32, 0x0C25);
476     + r8168_mac_ocp_write(tp, 0xFC34, 0x00A9);
477     + r8168_mac_ocp_write(tp, 0xFC36, 0x012D);
478     +
479     rtl_hw_aspm_clkreq_enable(tp, true);
480     }
481    
482     diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
483     index 5c18874614ba..0101ebaecf02 100644
484     --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
485     +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
486     @@ -3036,17 +3036,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
487    
488     /* Manage oversized TCP frames for GMAC4 device */
489     if (skb_is_gso(skb) && priv->tso) {
490     - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
491     - /*
492     - * There is no way to determine the number of TSO
493     - * capable Queues. Let's use always the Queue 0
494     - * because if TSO is supported then at least this
495     - * one will be capable.
496     - */
497     - skb_set_queue_mapping(skb, 0);
498     -
499     + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
500     return stmmac_tso_xmit(skb, dev);
501     - }
502     }
503    
504     if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
505     @@ -3855,6 +3846,23 @@ static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
506     }
507     }
508    
509     +static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
510     + struct net_device *sb_dev,
511     + select_queue_fallback_t fallback)
512     +{
513     + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
514     + /*
515     + * There is no way to determine the number of TSO
516     + * capable Queues. Let's use always the Queue 0
517     + * because if TSO is supported then at least this
518     + * one will be capable.
519     + */
520     + return 0;
521     + }
522     +
523     + return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
524     +}
525     +
526     static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
527     {
528     struct stmmac_priv *priv = netdev_priv(ndev);
529     @@ -4097,6 +4105,7 @@ static const struct net_device_ops stmmac_netdev_ops = {
530     .ndo_tx_timeout = stmmac_tx_timeout,
531     .ndo_do_ioctl = stmmac_ioctl,
532     .ndo_setup_tc = stmmac_setup_tc,
533     + .ndo_select_queue = stmmac_select_queue,
534     #ifdef CONFIG_NET_POLL_CONTROLLER
535     .ndo_poll_controller = stmmac_poll_controller,
536     #endif
537     diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
538     index cf6b9b1771f1..cc60ef9634db 100644
539     --- a/drivers/net/hyperv/netvsc_drv.c
540     +++ b/drivers/net/hyperv/netvsc_drv.c
541     @@ -847,7 +847,6 @@ int netvsc_recv_callback(struct net_device *net,
542     csum_info, vlan, data, len);
543     if (unlikely(!skb)) {
544     ++net_device_ctx->eth_stats.rx_no_memory;
545     - rcu_read_unlock();
546     return NVSP_STAT_FAIL;
547     }
548    
549     diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
550     index 7de88b33d5b9..2c971357e66c 100644
551     --- a/drivers/net/macsec.c
552     +++ b/drivers/net/macsec.c
553     @@ -869,6 +869,7 @@ static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
554    
555     static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
556     {
557     + skb->ip_summed = CHECKSUM_NONE;
558     memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
559     skb_pull(skb, hdr_len);
560     pskb_trim_unique(skb, skb->len - icv_len);
561     @@ -1103,10 +1104,9 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
562     }
563    
564     skb = skb_unshare(skb, GFP_ATOMIC);
565     - if (!skb) {
566     - *pskb = NULL;
567     + *pskb = skb;
568     + if (!skb)
569     return RX_HANDLER_CONSUMED;
570     - }
571    
572     pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
573     if (!pulled_sci) {
574     diff --git a/drivers/net/phy/sfp.c b/drivers/net/phy/sfp.c
575     index 418522aa2f71..998d08ae7431 100644
576     --- a/drivers/net/phy/sfp.c
577     +++ b/drivers/net/phy/sfp.c
578     @@ -514,7 +514,7 @@ static int sfp_hwmon_read_sensor(struct sfp *sfp, int reg, long *value)
579    
580     static void sfp_hwmon_to_rx_power(long *value)
581     {
582     - *value = DIV_ROUND_CLOSEST(*value, 100);
583     + *value = DIV_ROUND_CLOSEST(*value, 10);
584     }
585    
586     static void sfp_hwmon_calibrate(struct sfp *sfp, unsigned int slope, int offset,
587     diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
588     index 449fc52f9a89..9f895083bc0a 100644
589     --- a/drivers/net/vrf.c
590     +++ b/drivers/net/vrf.c
591     @@ -169,23 +169,29 @@ static int vrf_ip6_local_out(struct net *net, struct sock *sk,
592     static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
593     struct net_device *dev)
594     {
595     - const struct ipv6hdr *iph = ipv6_hdr(skb);
596     + const struct ipv6hdr *iph;
597     struct net *net = dev_net(skb->dev);
598     - struct flowi6 fl6 = {
599     - /* needed to match OIF rule */
600     - .flowi6_oif = dev->ifindex,
601     - .flowi6_iif = LOOPBACK_IFINDEX,
602     - .daddr = iph->daddr,
603     - .saddr = iph->saddr,
604     - .flowlabel = ip6_flowinfo(iph),
605     - .flowi6_mark = skb->mark,
606     - .flowi6_proto = iph->nexthdr,
607     - .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
608     - };
609     + struct flowi6 fl6;
610     int ret = NET_XMIT_DROP;
611     struct dst_entry *dst;
612     struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
613    
614     + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct ipv6hdr)))
615     + goto err;
616     +
617     + iph = ipv6_hdr(skb);
618     +
619     + memset(&fl6, 0, sizeof(fl6));
620     + /* needed to match OIF rule */
621     + fl6.flowi6_oif = dev->ifindex;
622     + fl6.flowi6_iif = LOOPBACK_IFINDEX;
623     + fl6.daddr = iph->daddr;
624     + fl6.saddr = iph->saddr;
625     + fl6.flowlabel = ip6_flowinfo(iph);
626     + fl6.flowi6_mark = skb->mark;
627     + fl6.flowi6_proto = iph->nexthdr;
628     + fl6.flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF;
629     +
630     dst = ip6_route_output(net, NULL, &fl6);
631     if (dst == dst_null)
632     goto err;
633     @@ -241,21 +247,27 @@ static int vrf_ip_local_out(struct net *net, struct sock *sk,
634     static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
635     struct net_device *vrf_dev)
636     {
637     - struct iphdr *ip4h = ip_hdr(skb);
638     + struct iphdr *ip4h;
639     int ret = NET_XMIT_DROP;
640     - struct flowi4 fl4 = {
641     - /* needed to match OIF rule */
642     - .flowi4_oif = vrf_dev->ifindex,
643     - .flowi4_iif = LOOPBACK_IFINDEX,
644     - .flowi4_tos = RT_TOS(ip4h->tos),
645     - .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
646     - .flowi4_proto = ip4h->protocol,
647     - .daddr = ip4h->daddr,
648     - .saddr = ip4h->saddr,
649     - };
650     + struct flowi4 fl4;
651     struct net *net = dev_net(vrf_dev);
652     struct rtable *rt;
653    
654     + if (!pskb_may_pull(skb, ETH_HLEN + sizeof(struct iphdr)))
655     + goto err;
656     +
657     + ip4h = ip_hdr(skb);
658     +
659     + memset(&fl4, 0, sizeof(fl4));
660     + /* needed to match OIF rule */
661     + fl4.flowi4_oif = vrf_dev->ifindex;
662     + fl4.flowi4_iif = LOOPBACK_IFINDEX;
663     + fl4.flowi4_tos = RT_TOS(ip4h->tos);
664     + fl4.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF;
665     + fl4.flowi4_proto = ip4h->protocol;
666     + fl4.daddr = ip4h->daddr;
667     + fl4.saddr = ip4h->saddr;
668     +
669     rt = ip_route_output_flow(net, &fl4, NULL);
670     if (IS_ERR(rt))
671     goto err;
672     diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
673     index f93f9881ec18..46d5c40f2835 100644
674     --- a/fs/ext4/dir.c
675     +++ b/fs/ext4/dir.c
676     @@ -108,7 +108,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
677     struct inode *inode = file_inode(file);
678     struct super_block *sb = inode->i_sb;
679     struct buffer_head *bh = NULL;
680     - int dir_has_error = 0;
681     struct fscrypt_str fstr = FSTR_INIT(NULL, 0);
682    
683     if (ext4_encrypted_inode(inode)) {
684     @@ -144,8 +143,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
685     return err;
686     }
687    
688     - offset = ctx->pos & (sb->s_blocksize - 1);
689     -
690     while (ctx->pos < inode->i_size) {
691     struct ext4_map_blocks map;
692    
693     @@ -154,9 +151,18 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
694     goto errout;
695     }
696     cond_resched();
697     + offset = ctx->pos & (sb->s_blocksize - 1);
698     map.m_lblk = ctx->pos >> EXT4_BLOCK_SIZE_BITS(sb);
699     map.m_len = 1;
700     err = ext4_map_blocks(NULL, inode, &map, 0);
701     + if (err == 0) {
702     + /* m_len should never be zero but let's avoid
703     + * an infinite loop if it somehow is */
704     + if (map.m_len == 0)
705     + map.m_len = 1;
706     + ctx->pos += map.m_len * sb->s_blocksize;
707     + continue;
708     + }
709     if (err > 0) {
710     pgoff_t index = map.m_pblk >>
711     (PAGE_SHIFT - inode->i_blkbits);
712     @@ -175,13 +181,6 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
713     }
714    
715     if (!bh) {
716     - if (!dir_has_error) {
717     - EXT4_ERROR_FILE(file, 0,
718     - "directory contains a "
719     - "hole at offset %llu",
720     - (unsigned long long) ctx->pos);
721     - dir_has_error = 1;
722     - }
723     /* corrupt size? Maybe no more blocks to read */
724     if (ctx->pos > inode->i_blocks << 9)
725     break;
726     diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
727     index df908ef79cce..402dc366117e 100644
728     --- a/fs/ext4/ext4_jbd2.h
729     +++ b/fs/ext4/ext4_jbd2.h
730     @@ -361,20 +361,20 @@ static inline int ext4_journal_force_commit(journal_t *journal)
731     }
732    
733     static inline int ext4_jbd2_inode_add_write(handle_t *handle,
734     - struct inode *inode)
735     + struct inode *inode, loff_t start_byte, loff_t length)
736     {
737     if (ext4_handle_valid(handle))
738     - return jbd2_journal_inode_add_write(handle,
739     - EXT4_I(inode)->jinode);
740     + return jbd2_journal_inode_ranged_write(handle,
741     + EXT4_I(inode)->jinode, start_byte, length);
742     return 0;
743     }
744    
745     static inline int ext4_jbd2_inode_add_wait(handle_t *handle,
746     - struct inode *inode)
747     + struct inode *inode, loff_t start_byte, loff_t length)
748     {
749     if (ext4_handle_valid(handle))
750     - return jbd2_journal_inode_add_wait(handle,
751     - EXT4_I(inode)->jinode);
752     + return jbd2_journal_inode_ranged_wait(handle,
753     + EXT4_I(inode)->jinode, start_byte, length);
754     return 0;
755     }
756    
757     diff --git a/fs/ext4/file.c b/fs/ext4/file.c
758     index 2c5baa5e8291..f4a24a46245e 100644
759     --- a/fs/ext4/file.c
760     +++ b/fs/ext4/file.c
761     @@ -165,6 +165,10 @@ static ssize_t ext4_write_checks(struct kiocb *iocb, struct iov_iter *from)
762     ret = generic_write_checks(iocb, from);
763     if (ret <= 0)
764     return ret;
765     +
766     + if (unlikely(IS_IMMUTABLE(inode)))
767     + return -EPERM;
768     +
769     /*
770     * If we have encountered a bitmap-format file, the size limit
771     * is smaller than s_maxbytes, which is for extent-mapped files.
772     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
773     index 05dc5a4ba481..e65559bf7728 100644
774     --- a/fs/ext4/inode.c
775     +++ b/fs/ext4/inode.c
776     @@ -729,10 +729,16 @@ out_sem:
777     !(flags & EXT4_GET_BLOCKS_ZERO) &&
778     !ext4_is_quota_file(inode) &&
779     ext4_should_order_data(inode)) {
780     + loff_t start_byte =
781     + (loff_t)map->m_lblk << inode->i_blkbits;
782     + loff_t length = (loff_t)map->m_len << inode->i_blkbits;
783     +
784     if (flags & EXT4_GET_BLOCKS_IO_SUBMIT)
785     - ret = ext4_jbd2_inode_add_wait(handle, inode);
786     + ret = ext4_jbd2_inode_add_wait(handle, inode,
787     + start_byte, length);
788     else
789     - ret = ext4_jbd2_inode_add_write(handle, inode);
790     + ret = ext4_jbd2_inode_add_write(handle, inode,
791     + start_byte, length);
792     if (ret)
793     return ret;
794     }
795     @@ -4058,7 +4064,8 @@ static int __ext4_block_zero_page_range(handle_t *handle,
796     err = 0;
797     mark_buffer_dirty(bh);
798     if (ext4_should_order_data(inode))
799     - err = ext4_jbd2_inode_add_write(handle, inode);
800     + err = ext4_jbd2_inode_add_write(handle, inode, from,
801     + length);
802     }
803    
804     unlock:
805     @@ -5491,6 +5498,14 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
806     if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
807     return -EIO;
808    
809     + if (unlikely(IS_IMMUTABLE(inode)))
810     + return -EPERM;
811     +
812     + if (unlikely(IS_APPEND(inode) &&
813     + (ia_valid & (ATTR_MODE | ATTR_UID |
814     + ATTR_GID | ATTR_TIMES_SET))))
815     + return -EPERM;
816     +
817     error = setattr_prepare(dentry, attr);
818     if (error)
819     return error;
820     @@ -6190,6 +6205,9 @@ int ext4_page_mkwrite(struct vm_fault *vmf)
821     get_block_t *get_block;
822     int retries = 0;
823    
824     + if (unlikely(IS_IMMUTABLE(inode)))
825     + return VM_FAULT_SIGBUS;
826     +
827     sb_start_pagefault(inode->i_sb);
828     file_update_time(vma->vm_file);
829    
830     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
831     index 53d57cdf3c4d..abb6fcff0a1d 100644
832     --- a/fs/ext4/ioctl.c
833     +++ b/fs/ext4/ioctl.c
834     @@ -268,6 +268,29 @@ static int uuid_is_zero(__u8 u[16])
835     }
836     #endif
837    
838     +/*
839     + * If immutable is set and we are not clearing it, we're not allowed to change
840     + * anything else in the inode. Don't error out if we're only trying to set
841     + * immutable on an immutable file.
842     + */
843     +static int ext4_ioctl_check_immutable(struct inode *inode, __u32 new_projid,
844     + unsigned int flags)
845     +{
846     + struct ext4_inode_info *ei = EXT4_I(inode);
847     + unsigned int oldflags = ei->i_flags;
848     +
849     + if (!(oldflags & EXT4_IMMUTABLE_FL) || !(flags & EXT4_IMMUTABLE_FL))
850     + return 0;
851     +
852     + if ((oldflags & ~EXT4_IMMUTABLE_FL) != (flags & ~EXT4_IMMUTABLE_FL))
853     + return -EPERM;
854     + if (ext4_has_feature_project(inode->i_sb) &&
855     + __kprojid_val(ei->i_projid) != new_projid)
856     + return -EPERM;
857     +
858     + return 0;
859     +}
860     +
861     static int ext4_ioctl_setflags(struct inode *inode,
862     unsigned int flags)
863     {
864     @@ -321,6 +344,20 @@ static int ext4_ioctl_setflags(struct inode *inode,
865     goto flags_out;
866     }
867    
868     + /*
869     + * Wait for all pending directio and then flush all the dirty pages
870     + * for this file. The flush marks all the pages readonly, so any
871     + * subsequent attempt to write to the file (particularly mmap pages)
872     + * will come through the filesystem and fail.
873     + */
874     + if (S_ISREG(inode->i_mode) && !IS_IMMUTABLE(inode) &&
875     + (flags & EXT4_IMMUTABLE_FL)) {
876     + inode_dio_wait(inode);
877     + err = filemap_write_and_wait(inode->i_mapping);
878     + if (err)
879     + goto flags_out;
880     + }
881     +
882     handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
883     if (IS_ERR(handle)) {
884     err = PTR_ERR(handle);
885     @@ -750,7 +787,11 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
886     return err;
887    
888     inode_lock(inode);
889     - err = ext4_ioctl_setflags(inode, flags);
890     + err = ext4_ioctl_check_immutable(inode,
891     + from_kprojid(&init_user_ns, ei->i_projid),
892     + flags);
893     + if (!err)
894     + err = ext4_ioctl_setflags(inode, flags);
895     inode_unlock(inode);
896     mnt_drop_write_file(filp);
897     return err;
898     @@ -1120,6 +1161,9 @@ resizefs_out:
899     goto out;
900     flags = (ei->i_flags & ~EXT4_FL_XFLAG_VISIBLE) |
901     (flags & EXT4_FL_XFLAG_VISIBLE);
902     + err = ext4_ioctl_check_immutable(inode, fa.fsx_projid, flags);
903     + if (err)
904     + goto out;
905     err = ext4_ioctl_setflags(inode, flags);
906     if (err)
907     goto out;
908     diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
909     index 2f5be02fc6f6..287631bb09e7 100644
910     --- a/fs/ext4/move_extent.c
911     +++ b/fs/ext4/move_extent.c
912     @@ -390,7 +390,8 @@ data_copy:
913    
914     /* Even in case of data=writeback it is reasonable to pin
915     * inode to transaction, to prevent unexpected data loss */
916     - *err = ext4_jbd2_inode_add_write(handle, orig_inode);
917     + *err = ext4_jbd2_inode_add_write(handle, orig_inode,
918     + (loff_t)orig_page_offset << PAGE_SHIFT, replaced_size);
919    
920     unlock_pages:
921     unlock_page(pagep[0]);
922     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
923     index 4c5aa5df6573..61dc1b0e4465 100644
924     --- a/fs/ext4/namei.c
925     +++ b/fs/ext4/namei.c
926     @@ -81,8 +81,18 @@ static struct buffer_head *ext4_append(handle_t *handle,
927     static int ext4_dx_csum_verify(struct inode *inode,
928     struct ext4_dir_entry *dirent);
929    
930     +/*
931     + * Hints to ext4_read_dirblock regarding whether we expect a directory
932     + * block being read to be an index block, or a block containing
933     + * directory entries (and if the latter, whether it was found via a
934     + * logical block in an htree index block). This is used to control
935     + * what sort of sanity checkinig ext4_read_dirblock() will do on the
936     + * directory block read from the storage device. EITHER will means
937     + * the caller doesn't know what kind of directory block will be read,
938     + * so no specific verification will be done.
939     + */
940     typedef enum {
941     - EITHER, INDEX, DIRENT
942     + EITHER, INDEX, DIRENT, DIRENT_HTREE
943     } dirblock_type_t;
944    
945     #define ext4_read_dirblock(inode, block, type) \
946     @@ -108,11 +118,14 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
947    
948     return bh;
949     }
950     - if (!bh) {
951     + if (!bh && (type == INDEX || type == DIRENT_HTREE)) {
952     ext4_error_inode(inode, func, line, block,
953     - "Directory hole found");
954     + "Directory hole found for htree %s block",
955     + (type == INDEX) ? "index" : "leaf");
956     return ERR_PTR(-EFSCORRUPTED);
957     }
958     + if (!bh)
959     + return NULL;
960     dirent = (struct ext4_dir_entry *) bh->b_data;
961     /* Determine whether or not we have an index block */
962     if (is_dx(inode)) {
963     @@ -979,7 +992,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
964    
965     dxtrace(printk(KERN_INFO "In htree dirblock_to_tree: block %lu\n",
966     (unsigned long)block));
967     - bh = ext4_read_dirblock(dir, block, DIRENT);
968     + bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
969     if (IS_ERR(bh))
970     return PTR_ERR(bh);
971    
972     @@ -1509,7 +1522,7 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
973     return (struct buffer_head *) frame;
974     do {
975     block = dx_get_block(frame->at);
976     - bh = ext4_read_dirblock(dir, block, DIRENT);
977     + bh = ext4_read_dirblock(dir, block, DIRENT_HTREE);
978     if (IS_ERR(bh))
979     goto errout;
980    
981     @@ -2079,6 +2092,11 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
982     blocks = dir->i_size >> sb->s_blocksize_bits;
983     for (block = 0; block < blocks; block++) {
984     bh = ext4_read_dirblock(dir, block, DIRENT);
985     + if (bh == NULL) {
986     + bh = ext4_bread(handle, dir, block,
987     + EXT4_GET_BLOCKS_CREATE);
988     + goto add_to_new_block;
989     + }
990     if (IS_ERR(bh)) {
991     retval = PTR_ERR(bh);
992     bh = NULL;
993     @@ -2099,6 +2117,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
994     brelse(bh);
995     }
996     bh = ext4_append(handle, dir, &block);
997     +add_to_new_block:
998     if (IS_ERR(bh)) {
999     retval = PTR_ERR(bh);
1000     bh = NULL;
1001     @@ -2143,7 +2162,7 @@ again:
1002     return PTR_ERR(frame);
1003     entries = frame->entries;
1004     at = frame->at;
1005     - bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
1006     + bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT_HTREE);
1007     if (IS_ERR(bh)) {
1008     err = PTR_ERR(bh);
1009     bh = NULL;
1010     @@ -2691,7 +2710,10 @@ bool ext4_empty_dir(struct inode *inode)
1011     EXT4_ERROR_INODE(inode, "invalid size");
1012     return true;
1013     }
1014     - bh = ext4_read_dirblock(inode, 0, EITHER);
1015     + /* The first directory block must not be a hole,
1016     + * so treat it as DIRENT_HTREE
1017     + */
1018     + bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
1019     if (IS_ERR(bh))
1020     return true;
1021    
1022     @@ -2713,6 +2735,10 @@ bool ext4_empty_dir(struct inode *inode)
1023     brelse(bh);
1024     lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
1025     bh = ext4_read_dirblock(inode, lblock, EITHER);
1026     + if (bh == NULL) {
1027     + offset += sb->s_blocksize;
1028     + continue;
1029     + }
1030     if (IS_ERR(bh))
1031     return true;
1032     de = (struct ext4_dir_entry_2 *) bh->b_data;
1033     @@ -3256,7 +3282,10 @@ static struct buffer_head *ext4_get_first_dir_block(handle_t *handle,
1034     struct buffer_head *bh;
1035    
1036     if (!ext4_has_inline_data(inode)) {
1037     - bh = ext4_read_dirblock(inode, 0, EITHER);
1038     + /* The first directory block must not be a hole, so
1039     + * treat it as DIRENT_HTREE
1040     + */
1041     + bh = ext4_read_dirblock(inode, 0, DIRENT_HTREE);
1042     if (IS_ERR(bh)) {
1043     *retval = PTR_ERR(bh);
1044     return NULL;
1045     diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
1046     index 65ea0355a4f6..24f86ffe11d7 100644
1047     --- a/fs/jbd2/commit.c
1048     +++ b/fs/jbd2/commit.c
1049     @@ -187,14 +187,15 @@ static int journal_wait_on_commit_record(journal_t *journal,
1050     * use writepages() because with dealyed allocation we may be doing
1051     * block allocation in writepages().
1052     */
1053     -static int journal_submit_inode_data_buffers(struct address_space *mapping)
1054     +static int journal_submit_inode_data_buffers(struct address_space *mapping,
1055     + loff_t dirty_start, loff_t dirty_end)
1056     {
1057     int ret;
1058     struct writeback_control wbc = {
1059     .sync_mode = WB_SYNC_ALL,
1060     .nr_to_write = mapping->nrpages * 2,
1061     - .range_start = 0,
1062     - .range_end = i_size_read(mapping->host),
1063     + .range_start = dirty_start,
1064     + .range_end = dirty_end,
1065     };
1066    
1067     ret = generic_writepages(mapping, &wbc);
1068     @@ -218,6 +219,9 @@ static int journal_submit_data_buffers(journal_t *journal,
1069    
1070     spin_lock(&journal->j_list_lock);
1071     list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
1072     + loff_t dirty_start = jinode->i_dirty_start;
1073     + loff_t dirty_end = jinode->i_dirty_end;
1074     +
1075     if (!(jinode->i_flags & JI_WRITE_DATA))
1076     continue;
1077     mapping = jinode->i_vfs_inode->i_mapping;
1078     @@ -230,7 +234,8 @@ static int journal_submit_data_buffers(journal_t *journal,
1079     * only allocated blocks here.
1080     */
1081     trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
1082     - err = journal_submit_inode_data_buffers(mapping);
1083     + err = journal_submit_inode_data_buffers(mapping, dirty_start,
1084     + dirty_end);
1085     if (!ret)
1086     ret = err;
1087     spin_lock(&journal->j_list_lock);
1088     @@ -257,12 +262,16 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
1089     /* For locking, see the comment in journal_submit_data_buffers() */
1090     spin_lock(&journal->j_list_lock);
1091     list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
1092     + loff_t dirty_start = jinode->i_dirty_start;
1093     + loff_t dirty_end = jinode->i_dirty_end;
1094     +
1095     if (!(jinode->i_flags & JI_WAIT_DATA))
1096     continue;
1097     jinode->i_flags |= JI_COMMIT_RUNNING;
1098     spin_unlock(&journal->j_list_lock);
1099     - err = filemap_fdatawait_keep_errors(
1100     - jinode->i_vfs_inode->i_mapping);
1101     + err = filemap_fdatawait_range_keep_errors(
1102     + jinode->i_vfs_inode->i_mapping, dirty_start,
1103     + dirty_end);
1104     if (!ret)
1105     ret = err;
1106     spin_lock(&journal->j_list_lock);
1107     @@ -282,6 +291,8 @@ static int journal_finish_inode_data_buffers(journal_t *journal,
1108     &jinode->i_transaction->t_inode_list);
1109     } else {
1110     jinode->i_transaction = NULL;
1111     + jinode->i_dirty_start = 0;
1112     + jinode->i_dirty_end = 0;
1113     }
1114     }
1115     spin_unlock(&journal->j_list_lock);
1116     diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
1117     index e9cf88f0bc29..df390a69c49a 100644
1118     --- a/fs/jbd2/journal.c
1119     +++ b/fs/jbd2/journal.c
1120     @@ -94,6 +94,8 @@ EXPORT_SYMBOL(jbd2_journal_try_to_free_buffers);
1121     EXPORT_SYMBOL(jbd2_journal_force_commit);
1122     EXPORT_SYMBOL(jbd2_journal_inode_add_write);
1123     EXPORT_SYMBOL(jbd2_journal_inode_add_wait);
1124     +EXPORT_SYMBOL(jbd2_journal_inode_ranged_write);
1125     +EXPORT_SYMBOL(jbd2_journal_inode_ranged_wait);
1126     EXPORT_SYMBOL(jbd2_journal_init_jbd_inode);
1127     EXPORT_SYMBOL(jbd2_journal_release_jbd_inode);
1128     EXPORT_SYMBOL(jbd2_journal_begin_ordered_truncate);
1129     @@ -2588,6 +2590,8 @@ void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode)
1130     jinode->i_next_transaction = NULL;
1131     jinode->i_vfs_inode = inode;
1132     jinode->i_flags = 0;
1133     + jinode->i_dirty_start = 0;
1134     + jinode->i_dirty_end = 0;
1135     INIT_LIST_HEAD(&jinode->i_list);
1136     }
1137    
1138     diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
1139     index e20a6703531f..911ff18249b7 100644
1140     --- a/fs/jbd2/transaction.c
1141     +++ b/fs/jbd2/transaction.c
1142     @@ -2500,7 +2500,7 @@ void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
1143     * File inode in the inode list of the handle's transaction
1144     */
1145     static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
1146     - unsigned long flags)
1147     + unsigned long flags, loff_t start_byte, loff_t end_byte)
1148     {
1149     transaction_t *transaction = handle->h_transaction;
1150     journal_t *journal;
1151     @@ -2512,26 +2512,17 @@ static int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode,
1152     jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
1153     transaction->t_tid);
1154    
1155     - /*
1156     - * First check whether inode isn't already on the transaction's
1157     - * lists without taking the lock. Note that this check is safe
1158     - * without the lock as we cannot race with somebody removing inode
1159     - * from the transaction. The reason is that we remove inode from the
1160     - * transaction only in journal_release_jbd_inode() and when we commit
1161     - * the transaction. We are guarded from the first case by holding
1162     - * a reference to the inode. We are safe against the second case
1163     - * because if jinode->i_transaction == transaction, commit code
1164     - * cannot touch the transaction because we hold reference to it,
1165     - * and if jinode->i_next_transaction == transaction, commit code
1166     - * will only file the inode where we want it.
1167     - */
1168     - if ((jinode->i_transaction == transaction ||
1169     - jinode->i_next_transaction == transaction) &&
1170     - (jinode->i_flags & flags) == flags)
1171     - return 0;
1172     -
1173     spin_lock(&journal->j_list_lock);
1174     jinode->i_flags |= flags;
1175     +
1176     + if (jinode->i_dirty_end) {
1177     + jinode->i_dirty_start = min(jinode->i_dirty_start, start_byte);
1178     + jinode->i_dirty_end = max(jinode->i_dirty_end, end_byte);
1179     + } else {
1180     + jinode->i_dirty_start = start_byte;
1181     + jinode->i_dirty_end = end_byte;
1182     + }
1183     +
1184     /* Is inode already attached where we need it? */
1185     if (jinode->i_transaction == transaction ||
1186     jinode->i_next_transaction == transaction)
1187     @@ -2566,12 +2557,28 @@ done:
1188     int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *jinode)
1189     {
1190     return jbd2_journal_file_inode(handle, jinode,
1191     - JI_WRITE_DATA | JI_WAIT_DATA);
1192     + JI_WRITE_DATA | JI_WAIT_DATA, 0, LLONG_MAX);
1193     }
1194    
1195     int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *jinode)
1196     {
1197     - return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA);
1198     + return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA, 0,
1199     + LLONG_MAX);
1200     +}
1201     +
1202     +int jbd2_journal_inode_ranged_write(handle_t *handle,
1203     + struct jbd2_inode *jinode, loff_t start_byte, loff_t length)
1204     +{
1205     + return jbd2_journal_file_inode(handle, jinode,
1206     + JI_WRITE_DATA | JI_WAIT_DATA, start_byte,
1207     + start_byte + length - 1);
1208     +}
1209     +
1210     +int jbd2_journal_inode_ranged_wait(handle_t *handle, struct jbd2_inode *jinode,
1211     + loff_t start_byte, loff_t length)
1212     +{
1213     + return jbd2_journal_file_inode(handle, jinode, JI_WAIT_DATA,
1214     + start_byte, start_byte + length - 1);
1215     }
1216    
1217     /*
1218     diff --git a/include/linux/fs.h b/include/linux/fs.h
1219     index d4e1b43a53c3..92420009b9bc 100644
1220     --- a/include/linux/fs.h
1221     +++ b/include/linux/fs.h
1222     @@ -2651,6 +2651,8 @@ extern int filemap_flush(struct address_space *);
1223     extern int filemap_fdatawait_keep_errors(struct address_space *mapping);
1224     extern int filemap_fdatawait_range(struct address_space *, loff_t lstart,
1225     loff_t lend);
1226     +extern int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
1227     + loff_t start_byte, loff_t end_byte);
1228    
1229     static inline int filemap_fdatawait(struct address_space *mapping)
1230     {
1231     diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
1232     index 583b82b5a1e9..1cf1b9b8e975 100644
1233     --- a/include/linux/jbd2.h
1234     +++ b/include/linux/jbd2.h
1235     @@ -454,6 +454,22 @@ struct jbd2_inode {
1236     * @i_flags: Flags of inode [j_list_lock]
1237     */
1238     unsigned long i_flags;
1239     +
1240     + /**
1241     + * @i_dirty_start:
1242     + *
1243     + * Offset in bytes where the dirty range for this inode starts.
1244     + * [j_list_lock]
1245     + */
1246     + loff_t i_dirty_start;
1247     +
1248     + /**
1249     + * @i_dirty_end:
1250     + *
1251     + * Inclusive offset in bytes where the dirty range for this inode
1252     + * ends. [j_list_lock]
1253     + */
1254     + loff_t i_dirty_end;
1255     };
1256    
1257     struct jbd2_revoke_table_s;
1258     @@ -1399,6 +1415,12 @@ extern int jbd2_journal_force_commit(journal_t *);
1259     extern int jbd2_journal_force_commit_nested(journal_t *);
1260     extern int jbd2_journal_inode_add_write(handle_t *handle, struct jbd2_inode *inode);
1261     extern int jbd2_journal_inode_add_wait(handle_t *handle, struct jbd2_inode *inode);
1262     +extern int jbd2_journal_inode_ranged_write(handle_t *handle,
1263     + struct jbd2_inode *inode, loff_t start_byte,
1264     + loff_t length);
1265     +extern int jbd2_journal_inode_ranged_wait(handle_t *handle,
1266     + struct jbd2_inode *inode, loff_t start_byte,
1267     + loff_t length);
1268     extern int jbd2_journal_begin_ordered_truncate(journal_t *journal,
1269     struct jbd2_inode *inode, loff_t new_size);
1270     extern void jbd2_journal_init_jbd_inode(struct jbd2_inode *jinode, struct inode *inode);
1271     diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
1272     index 42fc852bf512..b22bc81f3669 100644
1273     --- a/include/linux/perf_event.h
1274     +++ b/include/linux/perf_event.h
1275     @@ -1030,6 +1030,11 @@ static inline int in_software_context(struct perf_event *event)
1276     return event->ctx->pmu->task_ctx_nr == perf_sw_context;
1277     }
1278    
1279     +static inline int is_exclusive_pmu(struct pmu *pmu)
1280     +{
1281     + return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE;
1282     +}
1283     +
1284     extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
1285    
1286     extern void ___perf_sw_event(u32, u64, struct pt_regs *, u64);
1287     diff --git a/include/net/dst.h b/include/net/dst.h
1288     index 6cf0870414c7..ffc8ee0ea5e5 100644
1289     --- a/include/net/dst.h
1290     +++ b/include/net/dst.h
1291     @@ -313,8 +313,9 @@ static inline bool dst_hold_safe(struct dst_entry *dst)
1292     * @skb: buffer
1293     *
1294     * If dst is not yet refcounted and not destroyed, grab a ref on it.
1295     + * Returns true if dst is refcounted.
1296     */
1297     -static inline void skb_dst_force(struct sk_buff *skb)
1298     +static inline bool skb_dst_force(struct sk_buff *skb)
1299     {
1300     if (skb_dst_is_noref(skb)) {
1301     struct dst_entry *dst = skb_dst(skb);
1302     @@ -325,6 +326,8 @@ static inline void skb_dst_force(struct sk_buff *skb)
1303    
1304     skb->_skb_refdst = (unsigned long)dst;
1305     }
1306     +
1307     + return skb->_skb_refdst != 0UL;
1308     }
1309    
1310    
1311     diff --git a/include/net/tcp.h b/include/net/tcp.h
1312     index e75661f92daa..abcf53a6db04 100644
1313     --- a/include/net/tcp.h
1314     +++ b/include/net/tcp.h
1315     @@ -1054,7 +1054,8 @@ void tcp_get_default_congestion_control(struct net *net, char *name);
1316     void tcp_get_available_congestion_control(char *buf, size_t len);
1317     void tcp_get_allowed_congestion_control(char *buf, size_t len);
1318     int tcp_set_allowed_congestion_control(char *allowed);
1319     -int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit);
1320     +int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1321     + bool reinit, bool cap_net_admin);
1322     u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
1323     void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
1324    
1325     @@ -1646,6 +1647,11 @@ static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
1326     return skb_rb_first(&sk->tcp_rtx_queue);
1327     }
1328    
1329     +static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
1330     +{
1331     + return skb_rb_last(&sk->tcp_rtx_queue);
1332     +}
1333     +
1334     static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1335     {
1336     return skb_peek(&sk->sk_write_queue);
1337     diff --git a/include/net/tls.h b/include/net/tls.h
1338     index 954110575891..98f5ad0319a2 100644
1339     --- a/include/net/tls.h
1340     +++ b/include/net/tls.h
1341     @@ -234,6 +234,7 @@ struct tls_offload_context_rx {
1342     (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
1343     TLS_DRIVER_STATE_SIZE)
1344    
1345     +void tls_ctx_free(struct tls_context *ctx);
1346     int wait_on_pending_writer(struct sock *sk, long *timeo);
1347     int tls_sk_query(struct sock *sk, int optname, char __user *optval,
1348     int __user *optlen);
1349     diff --git a/kernel/events/core.c b/kernel/events/core.c
1350     index 3b61ff40bfe2..e8979c72514b 100644
1351     --- a/kernel/events/core.c
1352     +++ b/kernel/events/core.c
1353     @@ -2541,6 +2541,9 @@ unlock:
1354     return ret;
1355     }
1356    
1357     +static bool exclusive_event_installable(struct perf_event *event,
1358     + struct perf_event_context *ctx);
1359     +
1360     /*
1361     * Attach a performance event to a context.
1362     *
1363     @@ -2555,6 +2558,8 @@ perf_install_in_context(struct perf_event_context *ctx,
1364    
1365     lockdep_assert_held(&ctx->mutex);
1366    
1367     + WARN_ON_ONCE(!exclusive_event_installable(event, ctx));
1368     +
1369     if (event->cpu != -1)
1370     event->cpu = cpu;
1371    
1372     @@ -4341,7 +4346,7 @@ static int exclusive_event_init(struct perf_event *event)
1373     {
1374     struct pmu *pmu = event->pmu;
1375    
1376     - if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
1377     + if (!is_exclusive_pmu(pmu))
1378     return 0;
1379    
1380     /*
1381     @@ -4372,7 +4377,7 @@ static void exclusive_event_destroy(struct perf_event *event)
1382     {
1383     struct pmu *pmu = event->pmu;
1384    
1385     - if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
1386     + if (!is_exclusive_pmu(pmu))
1387     return;
1388    
1389     /* see comment in exclusive_event_init() */
1390     @@ -4392,14 +4397,15 @@ static bool exclusive_event_match(struct perf_event *e1, struct perf_event *e2)
1391     return false;
1392     }
1393    
1394     -/* Called under the same ctx::mutex as perf_install_in_context() */
1395     static bool exclusive_event_installable(struct perf_event *event,
1396     struct perf_event_context *ctx)
1397     {
1398     struct perf_event *iter_event;
1399     struct pmu *pmu = event->pmu;
1400    
1401     - if (!(pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE))
1402     + lockdep_assert_held(&ctx->mutex);
1403     +
1404     + if (!is_exclusive_pmu(pmu))
1405     return true;
1406    
1407     list_for_each_entry(iter_event, &ctx->event_list, event_entry) {
1408     @@ -4446,12 +4452,20 @@ static void _free_event(struct perf_event *event)
1409     if (event->destroy)
1410     event->destroy(event);
1411    
1412     - if (event->ctx)
1413     - put_ctx(event->ctx);
1414     -
1415     + /*
1416     + * Must be after ->destroy(), due to uprobe_perf_close() using
1417     + * hw.target.
1418     + */
1419     if (event->hw.target)
1420     put_task_struct(event->hw.target);
1421    
1422     + /*
1423     + * perf_event_free_task() relies on put_ctx() being 'last', in particular
1424     + * all task references must be cleaned up.
1425     + */
1426     + if (event->ctx)
1427     + put_ctx(event->ctx);
1428     +
1429     exclusive_event_destroy(event);
1430     module_put(event->pmu->module);
1431    
1432     @@ -4631,8 +4645,17 @@ again:
1433     mutex_unlock(&event->child_mutex);
1434    
1435     list_for_each_entry_safe(child, tmp, &free_list, child_list) {
1436     + void *var = &child->ctx->refcount;
1437     +
1438     list_del(&child->child_list);
1439     free_event(child);
1440     +
1441     + /*
1442     + * Wake any perf_event_free_task() waiting for this event to be
1443     + * freed.
1444     + */
1445     + smp_mb(); /* pairs with wait_var_event() */
1446     + wake_up_var(var);
1447     }
1448    
1449     no_ctx:
1450     @@ -10613,11 +10636,6 @@ SYSCALL_DEFINE5(perf_event_open,
1451     goto err_alloc;
1452     }
1453    
1454     - if ((pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE) && group_leader) {
1455     - err = -EBUSY;
1456     - goto err_context;
1457     - }
1458     -
1459     /*
1460     * Look up the group leader (we will attach this event to it):
1461     */
1462     @@ -10705,6 +10723,18 @@ SYSCALL_DEFINE5(perf_event_open,
1463     move_group = 0;
1464     }
1465     }
1466     +
1467     + /*
1468     + * Failure to create exclusive events returns -EBUSY.
1469     + */
1470     + err = -EBUSY;
1471     + if (!exclusive_event_installable(group_leader, ctx))
1472     + goto err_locked;
1473     +
1474     + for_each_sibling_event(sibling, group_leader) {
1475     + if (!exclusive_event_installable(sibling, ctx))
1476     + goto err_locked;
1477     + }
1478     } else {
1479     mutex_lock(&ctx->mutex);
1480     }
1481     @@ -10741,9 +10771,6 @@ SYSCALL_DEFINE5(perf_event_open,
1482     * because we need to serialize with concurrent event creation.
1483     */
1484     if (!exclusive_event_installable(event, ctx)) {
1485     - /* exclusive and group stuff are assumed mutually exclusive */
1486     - WARN_ON_ONCE(move_group);
1487     -
1488     err = -EBUSY;
1489     goto err_locked;
1490     }
1491     @@ -11210,11 +11237,11 @@ static void perf_free_event(struct perf_event *event,
1492     }
1493    
1494     /*
1495     - * Free an unexposed, unused context as created by inheritance by
1496     - * perf_event_init_task below, used by fork() in case of fail.
1497     + * Free a context as created by inheritance by perf_event_init_task() below,
1498     + * used by fork() in case of fail.
1499     *
1500     - * Not all locks are strictly required, but take them anyway to be nice and
1501     - * help out with the lockdep assertions.
1502     + * Even though the task has never lived, the context and events have been
1503     + * exposed through the child_list, so we must take care tearing it all down.
1504     */
1505     void perf_event_free_task(struct task_struct *task)
1506     {
1507     @@ -11244,7 +11271,23 @@ void perf_event_free_task(struct task_struct *task)
1508     perf_free_event(event, ctx);
1509    
1510     mutex_unlock(&ctx->mutex);
1511     - put_ctx(ctx);
1512     +
1513     + /*
1514     + * perf_event_release_kernel() could've stolen some of our
1515     + * child events and still have them on its free_list. In that
1516     + * case we must wait for these events to have been freed (in
1517     + * particular all their references to this task must've been
1518     + * dropped).
1519     + *
1520     + * Without this copy_process() will unconditionally free this
1521     + * task (irrespective of its reference count) and
1522     + * _free_event()'s put_task_struct(event->hw.target) will be a
1523     + * use-after-free.
1524     + *
1525     + * Wait for all events to drop their context reference.
1526     + */
1527     + wait_var_event(&ctx->refcount, atomic_read(&ctx->refcount) == 1);
1528     + put_ctx(ctx); /* must be last */
1529     }
1530     }
1531    
1532     diff --git a/mm/filemap.c b/mm/filemap.c
1533     index 52517f28e6f4..287f3fa02e5e 100644
1534     --- a/mm/filemap.c
1535     +++ b/mm/filemap.c
1536     @@ -561,6 +561,28 @@ int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,
1537     }
1538     EXPORT_SYMBOL(filemap_fdatawait_range);
1539    
1540     +/**
1541     + * filemap_fdatawait_range_keep_errors - wait for writeback to complete
1542     + * @mapping: address space structure to wait for
1543     + * @start_byte: offset in bytes where the range starts
1544     + * @end_byte: offset in bytes where the range ends (inclusive)
1545     + *
1546     + * Walk the list of under-writeback pages of the given address space in the
1547     + * given range and wait for all of them. Unlike filemap_fdatawait_range(),
1548     + * this function does not clear error status of the address space.
1549     + *
1550     + * Use this function if callers don't handle errors themselves. Expected
1551     + * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
1552     + * fsfreeze(8)
1553     + */
1554     +int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
1555     + loff_t start_byte, loff_t end_byte)
1556     +{
1557     + __filemap_fdatawait_range(mapping, start_byte, end_byte);
1558     + return filemap_check_and_keep_errors(mapping);
1559     +}
1560     +EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);
1561     +
1562     /**
1563     * file_fdatawait_range - wait for writeback to complete
1564     * @file: file pointing to address space structure to wait for
1565     diff --git a/mm/vmscan.c b/mm/vmscan.c
1566     index e42f44cf7b43..576379e87421 100644
1567     --- a/mm/vmscan.c
1568     +++ b/mm/vmscan.c
1569     @@ -2190,7 +2190,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
1570     * 10TB 320 32GB
1571     */
1572     static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
1573     - struct scan_control *sc, bool actual_reclaim)
1574     + struct scan_control *sc, bool trace)
1575     {
1576     enum lru_list active_lru = file * LRU_FILE + LRU_ACTIVE;
1577     struct pglist_data *pgdat = lruvec_pgdat(lruvec);
1578     @@ -2216,7 +2216,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
1579     * rid of the stale workingset quickly.
1580     */
1581     refaults = lruvec_page_state(lruvec, WORKINGSET_ACTIVATE);
1582     - if (file && actual_reclaim && lruvec->refaults != refaults) {
1583     + if (file && lruvec->refaults != refaults) {
1584     inactive_ratio = 0;
1585     } else {
1586     gb = (inactive + active) >> (30 - PAGE_SHIFT);
1587     @@ -2226,7 +2226,7 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
1588     inactive_ratio = 1;
1589     }
1590    
1591     - if (actual_reclaim)
1592     + if (trace)
1593     trace_mm_vmscan_inactive_list_is_low(pgdat->node_id, sc->reclaim_idx,
1594     lruvec_lru_size(lruvec, inactive_lru, MAX_NR_ZONES), inactive,
1595     lruvec_lru_size(lruvec, active_lru, MAX_NR_ZONES), active,
1596     diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
1597     index fed0ff446abb..2532c1a19645 100644
1598     --- a/net/bridge/br_input.c
1599     +++ b/net/bridge/br_input.c
1600     @@ -79,7 +79,6 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
1601     struct net_bridge_fdb_entry *dst = NULL;
1602     struct net_bridge_mdb_entry *mdst;
1603     bool local_rcv, mcast_hit = false;
1604     - const unsigned char *dest;
1605     struct net_bridge *br;
1606     u16 vid = 0;
1607    
1608     @@ -97,10 +96,9 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
1609     br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
1610    
1611     local_rcv = !!(br->dev->flags & IFF_PROMISC);
1612     - dest = eth_hdr(skb)->h_dest;
1613     - if (is_multicast_ether_addr(dest)) {
1614     + if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) {
1615     /* by definition the broadcast is also a multicast address */
1616     - if (is_broadcast_ether_addr(dest)) {
1617     + if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) {
1618     pkt_type = BR_PKT_BROADCAST;
1619     local_rcv = true;
1620     } else {
1621     @@ -150,7 +148,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
1622     }
1623     break;
1624     case BR_PKT_UNICAST:
1625     - dst = br_fdb_find_rcu(br, dest, vid);
1626     + dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid);
1627     default:
1628     break;
1629     }
1630     diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
1631     index 75901c4641b1..fb54d32321ec 100644
1632     --- a/net/bridge/br_multicast.c
1633     +++ b/net/bridge/br_multicast.c
1634     @@ -1147,6 +1147,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1635     int type;
1636     int err = 0;
1637     __be32 group;
1638     + u16 nsrcs;
1639    
1640     ih = igmpv3_report_hdr(skb);
1641     num = ntohs(ih->ngrec);
1642     @@ -1160,8 +1161,9 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1643     grec = (void *)(skb->data + len - sizeof(*grec));
1644     group = grec->grec_mca;
1645     type = grec->grec_type;
1646     + nsrcs = ntohs(grec->grec_nsrcs);
1647    
1648     - len += ntohs(grec->grec_nsrcs) * 4;
1649     + len += nsrcs * 4;
1650     if (!pskb_may_pull(skb, len))
1651     return -EINVAL;
1652    
1653     @@ -1182,7 +1184,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1654     src = eth_hdr(skb)->h_source;
1655     if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1656     type == IGMPV3_MODE_IS_INCLUDE) &&
1657     - ntohs(grec->grec_nsrcs) == 0) {
1658     + nsrcs == 0) {
1659     br_ip4_multicast_leave_group(br, port, group, vid, src);
1660     } else {
1661     err = br_ip4_multicast_add_group(br, port, group, vid,
1662     @@ -1217,23 +1219,26 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1663     len = skb_transport_offset(skb) + sizeof(*icmp6h);
1664    
1665     for (i = 0; i < num; i++) {
1666     - __be16 *nsrcs, _nsrcs;
1667     -
1668     - nsrcs = skb_header_pointer(skb,
1669     - len + offsetof(struct mld2_grec,
1670     - grec_nsrcs),
1671     - sizeof(_nsrcs), &_nsrcs);
1672     - if (!nsrcs)
1673     + __be16 *_nsrcs, __nsrcs;
1674     + u16 nsrcs;
1675     +
1676     + _nsrcs = skb_header_pointer(skb,
1677     + len + offsetof(struct mld2_grec,
1678     + grec_nsrcs),
1679     + sizeof(__nsrcs), &__nsrcs);
1680     + if (!_nsrcs)
1681     return -EINVAL;
1682    
1683     + nsrcs = ntohs(*_nsrcs);
1684     +
1685     if (!pskb_may_pull(skb,
1686     len + sizeof(*grec) +
1687     - sizeof(struct in6_addr) * ntohs(*nsrcs)))
1688     + sizeof(struct in6_addr) * nsrcs))
1689     return -EINVAL;
1690    
1691     grec = (struct mld2_grec *)(skb->data + len);
1692     len += sizeof(*grec) +
1693     - sizeof(struct in6_addr) * ntohs(*nsrcs);
1694     + sizeof(struct in6_addr) * nsrcs;
1695    
1696     /* We treat these as MLDv1 reports for now. */
1697     switch (grec->grec_type) {
1698     @@ -1252,7 +1257,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1699     src = eth_hdr(skb)->h_source;
1700     if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1701     grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1702     - ntohs(*nsrcs) == 0) {
1703     + nsrcs == 0) {
1704     br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1705     vid, src);
1706     } else {
1707     @@ -1505,7 +1510,6 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1708     struct sk_buff *skb,
1709     u16 vid)
1710     {
1711     - const struct ipv6hdr *ip6h = ipv6_hdr(skb);
1712     struct mld_msg *mld;
1713     struct net_bridge_mdb_entry *mp;
1714     struct mld2_query *mld2q;
1715     @@ -1549,7 +1553,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
1716    
1717     if (is_general_query) {
1718     saddr.proto = htons(ETH_P_IPV6);
1719     - saddr.u.ip6 = ip6h->saddr;
1720     + saddr.u.ip6 = ipv6_hdr(skb)->saddr;
1721    
1722     br_multicast_query_received(br, port, &br->ip6_other_query,
1723     &saddr, max_delay);
1724     diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
1725     index 1b75d6bf12bd..37ddcea3fc96 100644
1726     --- a/net/bridge/br_stp_bpdu.c
1727     +++ b/net/bridge/br_stp_bpdu.c
1728     @@ -147,7 +147,6 @@ void br_send_tcn_bpdu(struct net_bridge_port *p)
1729     void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
1730     struct net_device *dev)
1731     {
1732     - const unsigned char *dest = eth_hdr(skb)->h_dest;
1733     struct net_bridge_port *p;
1734     struct net_bridge *br;
1735     const unsigned char *buf;
1736     @@ -176,7 +175,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
1737     if (p->state == BR_STATE_DISABLED)
1738     goto out;
1739    
1740     - if (!ether_addr_equal(dest, br->group_addr))
1741     + if (!ether_addr_equal(eth_hdr(skb)->h_dest, br->group_addr))
1742     goto out;
1743    
1744     if (p->flags & BR_BPDU_GUARD) {
1745     diff --git a/net/core/filter.c b/net/core/filter.c
1746     index 34ec9324737b..c996380f2959 100644
1747     --- a/net/core/filter.c
1748     +++ b/net/core/filter.c
1749     @@ -3991,7 +3991,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
1750     TCP_CA_NAME_MAX-1));
1751     name[TCP_CA_NAME_MAX-1] = 0;
1752     ret = tcp_set_congestion_control(sk, name, false,
1753     - reinit);
1754     + reinit, true);
1755     } else {
1756     struct tcp_sock *tp = tcp_sk(sk);
1757    
1758     diff --git a/net/core/neighbour.c b/net/core/neighbour.c
1759     index cd9e991f21d7..c52d6e6b341c 100644
1760     --- a/net/core/neighbour.c
1761     +++ b/net/core/neighbour.c
1762     @@ -1021,6 +1021,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1763    
1764     atomic_set(&neigh->probes,
1765     NEIGH_VAR(neigh->parms, UCAST_PROBES));
1766     + neigh_del_timer(neigh);
1767     neigh->nud_state = NUD_INCOMPLETE;
1768     neigh->updated = now;
1769     next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1770     @@ -1037,6 +1038,7 @@ int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
1771     }
1772     } else if (neigh->nud_state & NUD_STALE) {
1773     neigh_dbg(2, "neigh %p is delayed\n", neigh);
1774     + neigh_del_timer(neigh);
1775     neigh->nud_state = NUD_DELAY;
1776     neigh->updated = jiffies;
1777     neigh_add_timer(neigh, jiffies +
1778     diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
1779     index ea4bd8a52422..d23746143cd2 100644
1780     --- a/net/ipv4/devinet.c
1781     +++ b/net/ipv4/devinet.c
1782     @@ -66,6 +66,11 @@
1783     #include <net/net_namespace.h>
1784     #include <net/addrconf.h>
1785    
1786     +#define IPV6ONLY_FLAGS \
1787     + (IFA_F_NODAD | IFA_F_OPTIMISTIC | IFA_F_DADFAILED | \
1788     + IFA_F_HOMEADDRESS | IFA_F_TENTATIVE | \
1789     + IFA_F_MANAGETEMPADDR | IFA_F_STABLE_PRIVACY)
1790     +
1791     static struct ipv4_devconf ipv4_devconf = {
1792     .data = {
1793     [IPV4_DEVCONF_ACCEPT_REDIRECTS - 1] = 1,
1794     @@ -462,6 +467,9 @@ static int __inet_insert_ifa(struct in_ifaddr *ifa, struct nlmsghdr *nlh,
1795     ifa->ifa_flags &= ~IFA_F_SECONDARY;
1796     last_primary = &in_dev->ifa_list;
1797    
1798     + /* Don't set IPv6 only flags to IPv4 addresses */
1799     + ifa->ifa_flags &= ~IPV6ONLY_FLAGS;
1800     +
1801     for (ifap = &in_dev->ifa_list; (ifa1 = *ifap) != NULL;
1802     ifap = &ifa1->ifa_next) {
1803     if (!(ifa1->ifa_flags & IFA_F_SECONDARY) &&
1804     diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
1805     index d187ee8156a1..b2240b7f225d 100644
1806     --- a/net/ipv4/igmp.c
1807     +++ b/net/ipv4/igmp.c
1808     @@ -1218,12 +1218,8 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
1809     if (pmc) {
1810     im->interface = pmc->interface;
1811     if (im->sfmode == MCAST_INCLUDE) {
1812     - im->tomb = pmc->tomb;
1813     - pmc->tomb = NULL;
1814     -
1815     - im->sources = pmc->sources;
1816     - pmc->sources = NULL;
1817     -
1818     + swap(im->tomb, pmc->tomb);
1819     + swap(im->sources, pmc->sources);
1820     for (psf = im->sources; psf; psf = psf->sf_next)
1821     psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
1822     } else {
1823     diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1824     index 364e6fdaa38f..b7ef367fe6a1 100644
1825     --- a/net/ipv4/tcp.c
1826     +++ b/net/ipv4/tcp.c
1827     @@ -2594,6 +2594,8 @@ int tcp_disconnect(struct sock *sk, int flags)
1828     tcp_saved_syn_free(tp);
1829     tp->compressed_ack = 0;
1830     tp->bytes_sent = 0;
1831     + tp->bytes_acked = 0;
1832     + tp->bytes_received = 0;
1833     tp->bytes_retrans = 0;
1834     tp->dsack_dups = 0;
1835     tp->reord_seen = 0;
1836     @@ -2729,7 +2731,9 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
1837     name[val] = 0;
1838    
1839     lock_sock(sk);
1840     - err = tcp_set_congestion_control(sk, name, true, true);
1841     + err = tcp_set_congestion_control(sk, name, true, true,
1842     + ns_capable(sock_net(sk)->user_ns,
1843     + CAP_NET_ADMIN));
1844     release_sock(sk);
1845     return err;
1846     }
1847     diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
1848     index bc6c02f16243..48f79db446a0 100644
1849     --- a/net/ipv4/tcp_cong.c
1850     +++ b/net/ipv4/tcp_cong.c
1851     @@ -332,7 +332,8 @@ out:
1852     * tcp_reinit_congestion_control (if the current congestion control was
1853     * already initialized.
1854     */
1855     -int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, bool reinit)
1856     +int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
1857     + bool reinit, bool cap_net_admin)
1858     {
1859     struct inet_connection_sock *icsk = inet_csk(sk);
1860     const struct tcp_congestion_ops *ca;
1861     @@ -368,8 +369,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, boo
1862     } else {
1863     err = -EBUSY;
1864     }
1865     - } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
1866     - ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))) {
1867     + } else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
1868     err = -EPERM;
1869     } else if (!try_module_get(ca->owner)) {
1870     err = -EBUSY;
1871     diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
1872     index 221d9b72423b..88c7e821fd11 100644
1873     --- a/net/ipv4/tcp_output.c
1874     +++ b/net/ipv4/tcp_output.c
1875     @@ -1289,6 +1289,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1876     struct tcp_sock *tp = tcp_sk(sk);
1877     struct sk_buff *buff;
1878     int nsize, old_factor;
1879     + long limit;
1880     int nlen;
1881     u8 flags;
1882    
1883     @@ -1299,8 +1300,16 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1884     if (nsize < 0)
1885     nsize = 0;
1886    
1887     - if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf &&
1888     - tcp_queue != TCP_FRAG_IN_WRITE_QUEUE)) {
1889     + /* tcp_sendmsg() can overshoot sk_wmem_queued by one full size skb.
1890     + * We need some allowance to not penalize applications setting small
1891     + * SO_SNDBUF values.
1892     + * Also allow first and last skb in retransmit queue to be split.
1893     + */
1894     + limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE);
1895     + if (unlikely((sk->sk_wmem_queued >> 1) > limit &&
1896     + tcp_queue != TCP_FRAG_IN_WRITE_QUEUE &&
1897     + skb != tcp_rtx_queue_head(sk) &&
1898     + skb != tcp_rtx_queue_tail(sk))) {
1899     NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPWQUEUETOOBIG);
1900     return -ENOMEM;
1901     }
1902     diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
1903     index a6c0479c1d55..bbb5ffb3397d 100644
1904     --- a/net/ipv6/ip6_fib.c
1905     +++ b/net/ipv6/ip6_fib.c
1906     @@ -1081,8 +1081,24 @@ add:
1907     err = call_fib6_entry_notifiers(info->nl_net,
1908     FIB_EVENT_ENTRY_ADD,
1909     rt, extack);
1910     - if (err)
1911     + if (err) {
1912     + struct fib6_info *sibling, *next_sibling;
1913     +
1914     + /* If the route has siblings, then it first
1915     + * needs to be unlinked from them.
1916     + */
1917     + if (!rt->fib6_nsiblings)
1918     + return err;
1919     +
1920     + list_for_each_entry_safe(sibling, next_sibling,
1921     + &rt->fib6_siblings,
1922     + fib6_siblings)
1923     + sibling->fib6_nsiblings--;
1924     + rt->fib6_nsiblings = 0;
1925     + list_del_init(&rt->fib6_siblings);
1926     + rt6_multipath_rebalance(next_sibling);
1927     return err;
1928     + }
1929    
1930     rcu_assign_pointer(rt->fib6_next, iter);
1931     atomic_inc(&rt->fib6_ref);
1932     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
1933     index 24f7b2cf504b..81220077d62f 100644
1934     --- a/net/ipv6/route.c
1935     +++ b/net/ipv6/route.c
1936     @@ -2214,7 +2214,7 @@ static struct dst_entry *rt6_check(struct rt6_info *rt,
1937     {
1938     u32 rt_cookie = 0;
1939    
1940     - if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
1941     + if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
1942     rt_cookie != cookie)
1943     return NULL;
1944    
1945     diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c
1946     index 7569ba00e732..a96a8c16baf9 100644
1947     --- a/net/netfilter/nf_queue.c
1948     +++ b/net/netfilter/nf_queue.c
1949     @@ -174,6 +174,11 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
1950     goto err;
1951     }
1952    
1953     + if (!skb_dst_force(skb) && state->hook != NF_INET_PRE_ROUTING) {
1954     + status = -ENETDOWN;
1955     + goto err;
1956     + }
1957     +
1958     *entry = (struct nf_queue_entry) {
1959     .skb = skb,
1960     .state = *state,
1961     @@ -182,7 +187,6 @@ static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state,
1962     };
1963    
1964     nf_queue_entry_get_refs(entry);
1965     - skb_dst_force(skb);
1966    
1967     switch (entry->state.pf) {
1968     case AF_INET:
1969     diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
1970     index 71ffd1a6dc7c..43910e50752c 100644
1971     --- a/net/netrom/af_netrom.c
1972     +++ b/net/netrom/af_netrom.c
1973     @@ -872,7 +872,7 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
1974     unsigned short frametype, flags, window, timeout;
1975     int ret;
1976    
1977     - skb->sk = NULL; /* Initially we don't know who it's for */
1978     + skb_orphan(skb);
1979    
1980     /*
1981     * skb->data points to the netrom frame start
1982     @@ -970,7 +970,9 @@ int nr_rx_frame(struct sk_buff *skb, struct net_device *dev)
1983    
1984     window = skb->data[20];
1985    
1986     + sock_hold(make);
1987     skb->sk = make;
1988     + skb->destructor = sock_efree;
1989     make->sk_state = TCP_ESTABLISHED;
1990    
1991     /* Fill in his circuit details */
1992     diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
1993     index 908f25e3773e..5405d073804c 100644
1994     --- a/net/nfc/nci/data.c
1995     +++ b/net/nfc/nci/data.c
1996     @@ -119,7 +119,7 @@ static int nci_queue_tx_data_frags(struct nci_dev *ndev,
1997     conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
1998     if (!conn_info) {
1999     rc = -EPROTO;
2000     - goto free_exit;
2001     + goto exit;
2002     }
2003    
2004     __skb_queue_head_init(&frags_q);
2005     diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
2006     index 85ae53d8fd09..8211e8e97c96 100644
2007     --- a/net/openvswitch/actions.c
2008     +++ b/net/openvswitch/actions.c
2009     @@ -175,8 +175,7 @@ static void update_ethertype(struct sk_buff *skb, struct ethhdr *hdr,
2010     if (skb->ip_summed == CHECKSUM_COMPLETE) {
2011     __be16 diff[] = { ~(hdr->h_proto), ethertype };
2012    
2013     - skb->csum = ~csum_partial((char *)diff, sizeof(diff),
2014     - ~skb->csum);
2015     + skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
2016     }
2017    
2018     hdr->h_proto = ethertype;
2019     @@ -268,8 +267,7 @@ static int set_mpls(struct sk_buff *skb, struct sw_flow_key *flow_key,
2020     if (skb->ip_summed == CHECKSUM_COMPLETE) {
2021     __be32 diff[] = { ~(stack->label_stack_entry), lse };
2022    
2023     - skb->csum = ~csum_partial((char *)diff, sizeof(diff),
2024     - ~skb->csum);
2025     + skb->csum = csum_partial((char *)diff, sizeof(diff), skb->csum);
2026     }
2027    
2028     stack->label_stack_entry = lse;
2029     diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
2030     index 3c39b8805d01..d76e5e58905d 100644
2031     --- a/net/rxrpc/af_rxrpc.c
2032     +++ b/net/rxrpc/af_rxrpc.c
2033     @@ -552,6 +552,7 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2034    
2035     switch (rx->sk.sk_state) {
2036     case RXRPC_UNBOUND:
2037     + case RXRPC_CLIENT_UNBOUND:
2038     rx->srx.srx_family = AF_RXRPC;
2039     rx->srx.srx_service = 0;
2040     rx->srx.transport_type = SOCK_DGRAM;
2041     @@ -576,10 +577,9 @@ static int rxrpc_sendmsg(struct socket *sock, struct msghdr *m, size_t len)
2042     }
2043    
2044     rx->local = local;
2045     - rx->sk.sk_state = RXRPC_CLIENT_UNBOUND;
2046     + rx->sk.sk_state = RXRPC_CLIENT_BOUND;
2047     /* Fall through */
2048    
2049     - case RXRPC_CLIENT_UNBOUND:
2050     case RXRPC_CLIENT_BOUND:
2051     if (!m->msg_name &&
2052     test_bit(RXRPC_SOCK_CONNECTED, &rx->flags)) {
2053     diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
2054     index 2167c6ca55e3..4159bcb479c6 100644
2055     --- a/net/sched/cls_api.c
2056     +++ b/net/sched/cls_api.c
2057     @@ -1325,6 +1325,9 @@ replay:
2058     tcf_chain_tp_insert(chain, &chain_info, tp);
2059     tfilter_notify(net, skb, n, tp, block, q, parent, fh,
2060     RTM_NEWTFILTER, false);
2061     + /* q pointer is NULL for shared blocks */
2062     + if (q)
2063     + q->flags &= ~TCQ_F_CAN_BYPASS;
2064     } else {
2065     if (tp_created)
2066     tcf_proto_destroy(tp, NULL);
2067     diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
2068     index 6c0a9d5dbf94..137692cb8b4f 100644
2069     --- a/net/sched/sch_fq_codel.c
2070     +++ b/net/sched/sch_fq_codel.c
2071     @@ -600,8 +600,6 @@ static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
2072     static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
2073     u32 classid)
2074     {
2075     - /* we cannot bypass queue discipline anymore */
2076     - sch->flags &= ~TCQ_F_CAN_BYPASS;
2077     return 0;
2078     }
2079    
2080     diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
2081     index 2f2678197760..650f21463853 100644
2082     --- a/net/sched/sch_sfq.c
2083     +++ b/net/sched/sch_sfq.c
2084     @@ -828,8 +828,6 @@ static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
2085     static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
2086     u32 classid)
2087     {
2088     - /* we cannot bypass queue discipline anymore */
2089     - sch->flags &= ~TCQ_F_CAN_BYPASS;
2090     return 0;
2091     }
2092    
2093     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
2094     index 8c00a7ef1bcd..9f5b4e547b63 100644
2095     --- a/net/sctp/socket.c
2096     +++ b/net/sctp/socket.c
2097     @@ -4507,34 +4507,18 @@ out_nounlock:
2098     static int sctp_connect(struct sock *sk, struct sockaddr *addr,
2099     int addr_len, int flags)
2100     {
2101     - struct inet_sock *inet = inet_sk(sk);
2102     struct sctp_af *af;
2103     - int err = 0;
2104     + int err = -EINVAL;
2105    
2106     lock_sock(sk);
2107    
2108     pr_debug("%s: sk:%p, sockaddr:%p, addr_len:%d\n", __func__, sk,
2109     addr, addr_len);
2110    
2111     - /* We may need to bind the socket. */
2112     - if (!inet->inet_num) {
2113     - if (sk->sk_prot->get_port(sk, 0)) {
2114     - release_sock(sk);
2115     - return -EAGAIN;
2116     - }
2117     - inet->inet_sport = htons(inet->inet_num);
2118     - }
2119     -
2120     /* Validate addr_len before calling common connect/connectx routine. */
2121     af = sctp_get_af_specific(addr->sa_family);
2122     - if (!af || addr_len < af->sockaddr_len) {
2123     - err = -EINVAL;
2124     - } else {
2125     - /* Pass correct addr len to common routine (so it knows there
2126     - * is only one address being passed.
2127     - */
2128     + if (af && addr_len >= af->sockaddr_len)
2129     err = __sctp_connect(sk, addr, af->sockaddr_len, flags, NULL);
2130     - }
2131    
2132     release_sock(sk);
2133     return err;
2134     diff --git a/net/sctp/stream.c b/net/sctp/stream.c
2135     index 3b47457862cc..0da57938a6c5 100644
2136     --- a/net/sctp/stream.c
2137     +++ b/net/sctp/stream.c
2138     @@ -253,13 +253,20 @@ out:
2139     int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
2140     {
2141     struct sctp_stream_out_ext *soute;
2142     + int ret;
2143    
2144     soute = kzalloc(sizeof(*soute), GFP_KERNEL);
2145     if (!soute)
2146     return -ENOMEM;
2147     SCTP_SO(stream, sid)->ext = soute;
2148    
2149     - return sctp_sched_init_sid(stream, sid, GFP_KERNEL);
2150     + ret = sctp_sched_init_sid(stream, sid, GFP_KERNEL);
2151     + if (ret) {
2152     + kfree(SCTP_SO(stream, sid)->ext);
2153     + SCTP_SO(stream, sid)->ext = NULL;
2154     + }
2155     +
2156     + return ret;
2157     }
2158    
2159     void sctp_stream_free(struct sctp_stream *stream)
2160     diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
2161     index ead29c2aefa7..0a613e0ef3bf 100644
2162     --- a/net/tls/tls_device.c
2163     +++ b/net/tls/tls_device.c
2164     @@ -61,7 +61,7 @@ static void tls_device_free_ctx(struct tls_context *ctx)
2165     if (ctx->rx_conf == TLS_HW)
2166     kfree(tls_offload_ctx_rx(ctx));
2167    
2168     - kfree(ctx);
2169     + tls_ctx_free(ctx);
2170     }
2171    
2172     static void tls_device_gc_task(struct work_struct *work)
2173     diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
2174     index 25b3fb585777..4c0ac79f82d4 100644
2175     --- a/net/tls/tls_main.c
2176     +++ b/net/tls/tls_main.c
2177     @@ -241,7 +241,7 @@ static void tls_write_space(struct sock *sk)
2178     ctx->sk_write_space(sk);
2179     }
2180    
2181     -static void tls_ctx_free(struct tls_context *ctx)
2182     +void tls_ctx_free(struct tls_context *ctx)
2183     {
2184     if (!ctx)
2185     return;