Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0242-5.4.143-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (19 months ago) by niro
File size: 78020 byte(s)
-sync kernel patches
1 diff --git a/Makefile b/Makefile
2 index ef3adc6ccb871..e99fabc4dfc8c 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 142
10 +SUBLEVEL = 143
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
15 index a9f191d78b544..d0ea95830d454 100644
16 --- a/arch/arm/boot/dts/am43x-epos-evm.dts
17 +++ b/arch/arm/boot/dts/am43x-epos-evm.dts
18 @@ -589,7 +589,7 @@
19 status = "okay";
20 pinctrl-names = "default";
21 pinctrl-0 = <&i2c0_pins>;
22 - clock-frequency = <400000>;
23 + clock-frequency = <100000>;
24
25 tps65218: tps65218@24 {
26 reg = <0x24>;
27 diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
28 index f78b4eabd68c2..e7178a6db6bef 100644
29 --- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
30 +++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
31 @@ -755,14 +755,14 @@
32 status = "disabled";
33 };
34
35 - vica: intc@10140000 {
36 + vica: interrupt-controller@10140000 {
37 compatible = "arm,versatile-vic";
38 interrupt-controller;
39 #interrupt-cells = <1>;
40 reg = <0x10140000 0x20>;
41 };
42
43 - vicb: intc@10140020 {
44 + vicb: interrupt-controller@10140020 {
45 compatible = "arm,versatile-vic";
46 interrupt-controller;
47 #interrupt-cells = <1>;
48 diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
49 index a9d1dd82d8208..03b3de491b5e6 100644
50 --- a/arch/x86/include/asm/fpu/internal.h
51 +++ b/arch/x86/include/asm/fpu/internal.h
52 @@ -204,6 +204,14 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
53 asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
54 }
55
56 +static inline void fxsave(struct fxregs_state *fx)
57 +{
58 + if (IS_ENABLED(CONFIG_X86_32))
59 + asm volatile( "fxsave %[fx]" : [fx] "=m" (*fx));
60 + else
61 + asm volatile("fxsaveq %[fx]" : [fx] "=m" (*fx));
62 +}
63 +
64 /* These macros all use (%edi)/(%rdi) as the single memory argument. */
65 #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
66 #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
67 @@ -272,28 +280,6 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
68 : "D" (st), "m" (*st), "a" (lmask), "d" (hmask) \
69 : "memory")
70
71 -/*
72 - * This function is called only during boot time when x86 caps are not set
73 - * up and alternative can not be used yet.
74 - */
75 -static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
76 -{
77 - u64 mask = -1;
78 - u32 lmask = mask;
79 - u32 hmask = mask >> 32;
80 - int err;
81 -
82 - WARN_ON(system_state != SYSTEM_BOOTING);
83 -
84 - if (boot_cpu_has(X86_FEATURE_XSAVES))
85 - XSTATE_OP(XSAVES, xstate, lmask, hmask, err);
86 - else
87 - XSTATE_OP(XSAVE, xstate, lmask, hmask, err);
88 -
89 - /* We should never fault when copying to a kernel buffer: */
90 - WARN_ON_FPU(err);
91 -}
92 -
93 /*
94 * This function is called only during boot time when x86 caps are not set
95 * up and alternative can not be used yet.
96 diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
97 index 735d1f1bbabc7..046782df37a6d 100644
98 --- a/arch/x86/kernel/fpu/xstate.c
99 +++ b/arch/x86/kernel/fpu/xstate.c
100 @@ -398,6 +398,24 @@ static void __init print_xstate_offset_size(void)
101 }
102 }
103
104 +/*
105 + * All supported features have either init state all zeros or are
106 + * handled in setup_init_fpu() individually. This is an explicit
107 + * feature list and does not use XFEATURE_MASK*SUPPORTED to catch
108 + * newly added supported features at build time and make people
109 + * actually look at the init state for the new feature.
110 + */
111 +#define XFEATURES_INIT_FPSTATE_HANDLED \
112 + (XFEATURE_MASK_FP | \
113 + XFEATURE_MASK_SSE | \
114 + XFEATURE_MASK_YMM | \
115 + XFEATURE_MASK_OPMASK | \
116 + XFEATURE_MASK_ZMM_Hi256 | \
117 + XFEATURE_MASK_Hi16_ZMM | \
118 + XFEATURE_MASK_PKRU | \
119 + XFEATURE_MASK_BNDREGS | \
120 + XFEATURE_MASK_BNDCSR)
121 +
122 /*
123 * setup the xstate image representing the init state
124 */
125 @@ -405,6 +423,8 @@ static void __init setup_init_fpu_buf(void)
126 {
127 static int on_boot_cpu __initdata = 1;
128
129 + BUILD_BUG_ON(XCNTXT_MASK != XFEATURES_INIT_FPSTATE_HANDLED);
130 +
131 WARN_ON_FPU(!on_boot_cpu);
132 on_boot_cpu = 0;
133
134 @@ -423,10 +443,22 @@ static void __init setup_init_fpu_buf(void)
135 copy_kernel_to_xregs_booting(&init_fpstate.xsave);
136
137 /*
138 - * Dump the init state again. This is to identify the init state
139 - * of any feature which is not represented by all zero's.
140 + * All components are now in init state. Read the state back so
141 + * that init_fpstate contains all non-zero init state. This only
142 + * works with XSAVE, but not with XSAVEOPT and XSAVES because
143 + * those use the init optimization which skips writing data for
144 + * components in init state.
145 + *
146 + * XSAVE could be used, but that would require to reshuffle the
147 + * data when XSAVES is available because XSAVES uses xstate
148 + * compaction. But doing so is a pointless exercise because most
149 + * components have an all zeros init state except for the legacy
150 + * ones (FP and SSE). Those can be saved with FXSAVE into the
151 + * legacy area. Adding new features requires to ensure that init
152 + * state is all zeroes or if not to add the necessary handling
153 + * here.
154 */
155 - copy_xregs_to_kernel_booting(&init_fpstate.xsave);
156 + fxsave(&init_fpstate.fxsave);
157 }
158
159 static int xfeature_uncompacted_offset(int xfeature_nr)
160 diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c
161 index e4782f562e7a9..2de7fd18f66a1 100644
162 --- a/drivers/cpufreq/armada-37xx-cpufreq.c
163 +++ b/drivers/cpufreq/armada-37xx-cpufreq.c
164 @@ -102,7 +102,11 @@ struct armada_37xx_dvfs {
165 };
166
167 static struct armada_37xx_dvfs armada_37xx_dvfs[] = {
168 - {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} },
169 + /*
170 + * The cpufreq scaling for 1.2 GHz variant of the SOC is currently
171 + * unstable because we do not know how to configure it properly.
172 + */
173 + /* {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, */
174 {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} },
175 {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} },
176 {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} },
177 diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
178 index 4bbf4172b9bf9..e3f1d4ab8e4f3 100644
179 --- a/drivers/dma/of-dma.c
180 +++ b/drivers/dma/of-dma.c
181 @@ -65,8 +65,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
182 return NULL;
183
184 ofdma_target = of_dma_find_controller(&dma_spec_target);
185 - if (!ofdma_target)
186 - return NULL;
187 + if (!ofdma_target) {
188 + ofdma->dma_router->route_free(ofdma->dma_router->dev,
189 + route_data);
190 + chan = ERR_PTR(-EPROBE_DEFER);
191 + goto err;
192 + }
193
194 chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
195 if (IS_ERR_OR_NULL(chan)) {
196 @@ -77,6 +81,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
197 chan->route_data = route_data;
198 }
199
200 +err:
201 /*
202 * Need to put the node back since the ofdma->of_dma_route_allocate
203 * has taken it for generating the new, translated dma_spec
204 diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
205 index 8f7ceb698226c..1cc06900153e4 100644
206 --- a/drivers/dma/sh/usb-dmac.c
207 +++ b/drivers/dma/sh/usb-dmac.c
208 @@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
209
210 error:
211 of_dma_controller_free(pdev->dev.of_node);
212 - pm_runtime_put(&pdev->dev);
213 error_pm:
214 + pm_runtime_put(&pdev->dev);
215 pm_runtime_disable(&pdev->dev);
216 return ret;
217 }
218 diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
219 index 1b5f3e9f43d70..ce18bca45ff27 100644
220 --- a/drivers/dma/xilinx/xilinx_dma.c
221 +++ b/drivers/dma/xilinx/xilinx_dma.c
222 @@ -333,6 +333,7 @@ struct xilinx_dma_tx_descriptor {
223 * @genlock: Support genlock mode
224 * @err: Channel has errors
225 * @idle: Check for channel idle
226 + * @terminating: Check for channel being synchronized by user
227 * @tasklet: Cleanup work after irq
228 * @config: Device configuration info
229 * @flush_on_fsync: Flush on Frame sync
230 @@ -370,6 +371,7 @@ struct xilinx_dma_chan {
231 bool genlock;
232 bool err;
233 bool idle;
234 + bool terminating;
235 struct tasklet_struct tasklet;
236 struct xilinx_vdma_config config;
237 bool flush_on_fsync;
238 @@ -844,6 +846,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
239 /* Run any dependencies, then free the descriptor */
240 dma_run_dependencies(&desc->async_tx);
241 xilinx_dma_free_tx_descriptor(chan, desc);
242 +
243 + /*
244 + * While we ran a callback the user called a terminate function,
245 + * which takes care of cleaning up any remaining descriptors
246 + */
247 + if (chan->terminating)
248 + break;
249 }
250
251 spin_unlock_irqrestore(&chan->lock, flags);
252 @@ -1618,6 +1627,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
253 if (desc->cyclic)
254 chan->cyclic = true;
255
256 + chan->terminating = false;
257 +
258 spin_unlock_irqrestore(&chan->lock, flags);
259
260 return cookie;
261 @@ -2074,6 +2085,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
262 }
263
264 /* Remove and free all of the descriptors in the lists */
265 + chan->terminating = true;
266 xilinx_dma_free_descriptors(chan);
267 chan->idle = true;
268
269 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
270 index 8d5cfd5357c75..03e2073339539 100644
271 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
272 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c
273 @@ -362,7 +362,7 @@ void optc2_lock_doublebuffer_enable(struct timing_generator *optc)
274
275 REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
276 MASTER_UPDATE_LOCK_DB_X,
277 - h_blank_start - 200 - 1,
278 + (h_blank_start - 200 - 1) / optc1->opp_count,
279 MASTER_UPDATE_LOCK_DB_Y,
280 v_blank_start - 1);
281 }
282 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
283 index 9d7232e26ecf0..c5758fb696cc8 100644
284 --- a/drivers/iommu/iommu.c
285 +++ b/drivers/iommu/iommu.c
286 @@ -775,6 +775,9 @@ void iommu_group_remove_device(struct device *dev)
287 struct iommu_group *group = dev->iommu_group;
288 struct group_device *tmp_device, *device = NULL;
289
290 + if (!group)
291 + return;
292 +
293 dev_info(dev, "Removing from iommu group %d\n", group->id);
294
295 /* Pre-notify listeners that a device is being removed. */
296 diff --git a/drivers/ipack/carriers/tpci200.c b/drivers/ipack/carriers/tpci200.c
297 index b05d6125c787a..46665b1cef85a 100644
298 --- a/drivers/ipack/carriers/tpci200.c
299 +++ b/drivers/ipack/carriers/tpci200.c
300 @@ -91,16 +91,13 @@ static void tpci200_unregister(struct tpci200_board *tpci200)
301 free_irq(tpci200->info->pdev->irq, (void *) tpci200);
302
303 pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
304 - pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
305
306 pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
307 pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
308 pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
309 pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
310 - pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
311
312 pci_disable_device(tpci200->info->pdev);
313 - pci_dev_put(tpci200->info->pdev);
314 }
315
316 static void tpci200_enable_irq(struct tpci200_board *tpci200,
317 @@ -259,7 +256,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
318 "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 2 !",
319 tpci200->info->pdev->bus->number,
320 tpci200->info->pdev->devfn);
321 - goto out_disable_pci;
322 + goto err_disable_device;
323 }
324
325 /* Request IO ID INT space (Bar 3) */
326 @@ -271,7 +268,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
327 "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 3 !",
328 tpci200->info->pdev->bus->number,
329 tpci200->info->pdev->devfn);
330 - goto out_release_ip_space;
331 + goto err_ip_interface_bar;
332 }
333
334 /* Request MEM8 space (Bar 5) */
335 @@ -282,7 +279,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
336 "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 5!",
337 tpci200->info->pdev->bus->number,
338 tpci200->info->pdev->devfn);
339 - goto out_release_ioid_int_space;
340 + goto err_io_id_int_spaces_bar;
341 }
342
343 /* Request MEM16 space (Bar 4) */
344 @@ -293,7 +290,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
345 "(bn 0x%X, sn 0x%X) failed to allocate PCI resource for BAR 4!",
346 tpci200->info->pdev->bus->number,
347 tpci200->info->pdev->devfn);
348 - goto out_release_mem8_space;
349 + goto err_mem8_space_bar;
350 }
351
352 /* Map internal tpci200 driver user space */
353 @@ -307,7 +304,7 @@ static int tpci200_register(struct tpci200_board *tpci200)
354 tpci200->info->pdev->bus->number,
355 tpci200->info->pdev->devfn);
356 res = -ENOMEM;
357 - goto out_release_mem8_space;
358 + goto err_mem16_space_bar;
359 }
360
361 /* Initialize lock that protects interface_regs */
362 @@ -346,18 +343,22 @@ static int tpci200_register(struct tpci200_board *tpci200)
363 "(bn 0x%X, sn 0x%X) unable to register IRQ !",
364 tpci200->info->pdev->bus->number,
365 tpci200->info->pdev->devfn);
366 - goto out_release_ioid_int_space;
367 + goto err_interface_regs;
368 }
369
370 return 0;
371
372 -out_release_mem8_space:
373 +err_interface_regs:
374 + pci_iounmap(tpci200->info->pdev, tpci200->info->interface_regs);
375 +err_mem16_space_bar:
376 + pci_release_region(tpci200->info->pdev, TPCI200_MEM16_SPACE_BAR);
377 +err_mem8_space_bar:
378 pci_release_region(tpci200->info->pdev, TPCI200_MEM8_SPACE_BAR);
379 -out_release_ioid_int_space:
380 +err_io_id_int_spaces_bar:
381 pci_release_region(tpci200->info->pdev, TPCI200_IO_ID_INT_SPACES_BAR);
382 -out_release_ip_space:
383 +err_ip_interface_bar:
384 pci_release_region(tpci200->info->pdev, TPCI200_IP_INTERFACE_BAR);
385 -out_disable_pci:
386 +err_disable_device:
387 pci_disable_device(tpci200->info->pdev);
388 return res;
389 }
390 @@ -529,7 +530,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
391 tpci200->info = kzalloc(sizeof(struct tpci200_infos), GFP_KERNEL);
392 if (!tpci200->info) {
393 ret = -ENOMEM;
394 - goto out_err_info;
395 + goto err_tpci200;
396 }
397
398 pci_dev_get(pdev);
399 @@ -540,7 +541,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
400 if (ret) {
401 dev_err(&pdev->dev, "Failed to allocate PCI Configuration Memory");
402 ret = -EBUSY;
403 - goto out_err_pci_request;
404 + goto err_tpci200_info;
405 }
406 tpci200->info->cfg_regs = ioremap_nocache(
407 pci_resource_start(pdev, TPCI200_CFG_MEM_BAR),
408 @@ -548,7 +549,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
409 if (!tpci200->info->cfg_regs) {
410 dev_err(&pdev->dev, "Failed to map PCI Configuration Memory");
411 ret = -EFAULT;
412 - goto out_err_ioremap;
413 + goto err_request_region;
414 }
415
416 /* Disable byte swapping for 16 bit IP module access. This will ensure
417 @@ -571,7 +572,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
418 if (ret) {
419 dev_err(&pdev->dev, "error during tpci200 install\n");
420 ret = -ENODEV;
421 - goto out_err_install;
422 + goto err_cfg_regs;
423 }
424
425 /* Register the carrier in the industry pack bus driver */
426 @@ -583,7 +584,7 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
427 dev_err(&pdev->dev,
428 "error registering the carrier on ipack driver\n");
429 ret = -EFAULT;
430 - goto out_err_bus_register;
431 + goto err_tpci200_install;
432 }
433
434 /* save the bus number given by ipack to logging purpose */
435 @@ -594,19 +595,16 @@ static int tpci200_pci_probe(struct pci_dev *pdev,
436 tpci200_create_device(tpci200, i);
437 return 0;
438
439 -out_err_bus_register:
440 +err_tpci200_install:
441 tpci200_uninstall(tpci200);
442 - /* tpci200->info->cfg_regs is unmapped in tpci200_uninstall */
443 - tpci200->info->cfg_regs = NULL;
444 -out_err_install:
445 - if (tpci200->info->cfg_regs)
446 - iounmap(tpci200->info->cfg_regs);
447 -out_err_ioremap:
448 +err_cfg_regs:
449 + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
450 +err_request_region:
451 pci_release_region(pdev, TPCI200_CFG_MEM_BAR);
452 -out_err_pci_request:
453 - pci_dev_put(pdev);
454 +err_tpci200_info:
455 kfree(tpci200->info);
456 -out_err_info:
457 + pci_dev_put(pdev);
458 +err_tpci200:
459 kfree(tpci200);
460 return ret;
461 }
462 @@ -616,6 +614,12 @@ static void __tpci200_pci_remove(struct tpci200_board *tpci200)
463 ipack_bus_unregister(tpci200->info->ipack_bus);
464 tpci200_uninstall(tpci200);
465
466 + pci_iounmap(tpci200->info->pdev, tpci200->info->cfg_regs);
467 +
468 + pci_release_region(tpci200->info->pdev, TPCI200_CFG_MEM_BAR);
469 +
470 + pci_dev_put(tpci200->info->pdev);
471 +
472 kfree(tpci200->info);
473 kfree(tpci200);
474 }
475 diff --git a/drivers/media/usb/zr364xx/zr364xx.c b/drivers/media/usb/zr364xx/zr364xx.c
476 index 02458c9cb5dc0..25f16ff6dcc75 100644
477 --- a/drivers/media/usb/zr364xx/zr364xx.c
478 +++ b/drivers/media/usb/zr364xx/zr364xx.c
479 @@ -1187,15 +1187,11 @@ out:
480 return err;
481 }
482
483 -static void zr364xx_release(struct v4l2_device *v4l2_dev)
484 +static void zr364xx_board_uninit(struct zr364xx_camera *cam)
485 {
486 - struct zr364xx_camera *cam =
487 - container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
488 unsigned long i;
489
490 - v4l2_device_unregister(&cam->v4l2_dev);
491 -
492 - videobuf_mmap_free(&cam->vb_vidq);
493 + zr364xx_stop_readpipe(cam);
494
495 /* release sys buffers */
496 for (i = 0; i < FRAMES; i++) {
497 @@ -1206,9 +1202,19 @@ static void zr364xx_release(struct v4l2_device *v4l2_dev)
498 cam->buffer.frame[i].lpvbits = NULL;
499 }
500
501 - v4l2_ctrl_handler_free(&cam->ctrl_handler);
502 /* release transfer buffer */
503 kfree(cam->pipe->transfer_buffer);
504 +}
505 +
506 +static void zr364xx_release(struct v4l2_device *v4l2_dev)
507 +{
508 + struct zr364xx_camera *cam =
509 + container_of(v4l2_dev, struct zr364xx_camera, v4l2_dev);
510 +
511 + videobuf_mmap_free(&cam->vb_vidq);
512 + v4l2_ctrl_handler_free(&cam->ctrl_handler);
513 + zr364xx_board_uninit(cam);
514 + v4l2_device_unregister(&cam->v4l2_dev);
515 kfree(cam);
516 }
517
518 @@ -1331,6 +1337,7 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
519 {
520 struct zr364xx_pipeinfo *pipe = cam->pipe;
521 unsigned long i;
522 + int err;
523
524 DBG("board init: %p\n", cam);
525 memset(pipe, 0, sizeof(*pipe));
526 @@ -1363,9 +1370,8 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
527
528 if (i == 0) {
529 printk(KERN_INFO KBUILD_MODNAME ": out of memory. Aborting\n");
530 - kfree(cam->pipe->transfer_buffer);
531 - cam->pipe->transfer_buffer = NULL;
532 - return -ENOMEM;
533 + err = -ENOMEM;
534 + goto err_free;
535 } else
536 cam->buffer.dwFrames = i;
537
538 @@ -1380,9 +1386,20 @@ static int zr364xx_board_init(struct zr364xx_camera *cam)
539 /*** end create system buffers ***/
540
541 /* start read pipe */
542 - zr364xx_start_readpipe(cam);
543 + err = zr364xx_start_readpipe(cam);
544 + if (err)
545 + goto err_free_frames;
546 +
547 DBG(": board initialized\n");
548 return 0;
549 +
550 +err_free_frames:
551 + for (i = 0; i < FRAMES; i++)
552 + vfree(cam->buffer.frame[i].lpvbits);
553 +err_free:
554 + kfree(cam->pipe->transfer_buffer);
555 + cam->pipe->transfer_buffer = NULL;
556 + return err;
557 }
558
559 static int zr364xx_probe(struct usb_interface *intf,
560 @@ -1407,12 +1424,10 @@ static int zr364xx_probe(struct usb_interface *intf,
561 if (!cam)
562 return -ENOMEM;
563
564 - cam->v4l2_dev.release = zr364xx_release;
565 err = v4l2_device_register(&intf->dev, &cam->v4l2_dev);
566 if (err < 0) {
567 dev_err(&udev->dev, "couldn't register v4l2_device\n");
568 - kfree(cam);
569 - return err;
570 + goto free_cam;
571 }
572 hdl = &cam->ctrl_handler;
573 v4l2_ctrl_handler_init(hdl, 1);
574 @@ -1421,7 +1436,7 @@ static int zr364xx_probe(struct usb_interface *intf,
575 if (hdl->error) {
576 err = hdl->error;
577 dev_err(&udev->dev, "couldn't register control\n");
578 - goto fail;
579 + goto free_hdlr_and_unreg_dev;
580 }
581 /* save the init method used by this camera */
582 cam->method = id->driver_info;
583 @@ -1494,7 +1509,7 @@ static int zr364xx_probe(struct usb_interface *intf,
584 if (!cam->read_endpoint) {
585 err = -ENOMEM;
586 dev_err(&intf->dev, "Could not find bulk-in endpoint\n");
587 - goto fail;
588 + goto free_hdlr_and_unreg_dev;
589 }
590
591 /* v4l */
592 @@ -1505,10 +1520,11 @@ static int zr364xx_probe(struct usb_interface *intf,
593
594 /* load zr364xx board specific */
595 err = zr364xx_board_init(cam);
596 - if (!err)
597 - err = v4l2_ctrl_handler_setup(hdl);
598 if (err)
599 - goto fail;
600 + goto free_hdlr_and_unreg_dev;
601 + err = v4l2_ctrl_handler_setup(hdl);
602 + if (err)
603 + goto board_uninit;
604
605 spin_lock_init(&cam->slock);
606
607 @@ -1523,16 +1539,20 @@ static int zr364xx_probe(struct usb_interface *intf,
608 err = video_register_device(&cam->vdev, VFL_TYPE_GRABBER, -1);
609 if (err) {
610 dev_err(&udev->dev, "video_register_device failed\n");
611 - goto fail;
612 + goto board_uninit;
613 }
614 + cam->v4l2_dev.release = zr364xx_release;
615
616 dev_info(&udev->dev, DRIVER_DESC " controlling device %s\n",
617 video_device_node_name(&cam->vdev));
618 return 0;
619
620 -fail:
621 +board_uninit:
622 + zr364xx_board_uninit(cam);
623 +free_hdlr_and_unreg_dev:
624 v4l2_ctrl_handler_free(hdl);
625 v4l2_device_unregister(&cam->v4l2_dev);
626 +free_cam:
627 kfree(cam);
628 return err;
629 }
630 @@ -1579,10 +1599,19 @@ static int zr364xx_resume(struct usb_interface *intf)
631 if (!cam->was_streaming)
632 return 0;
633
634 - zr364xx_start_readpipe(cam);
635 + res = zr364xx_start_readpipe(cam);
636 + if (res)
637 + return res;
638 +
639 res = zr364xx_prepare(cam);
640 - if (!res)
641 - zr364xx_start_acquire(cam);
642 + if (res)
643 + goto err_prepare;
644 +
645 + zr364xx_start_acquire(cam);
646 + return 0;
647 +
648 +err_prepare:
649 + zr364xx_stop_readpipe(cam);
650 return res;
651 }
652 #endif
653 diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
654 index 79c55c7b4afd9..6ace82028667b 100644
655 --- a/drivers/mmc/host/dw_mmc.c
656 +++ b/drivers/mmc/host/dw_mmc.c
657 @@ -2017,8 +2017,8 @@ static void dw_mci_tasklet_func(unsigned long priv)
658 continue;
659 }
660
661 - dw_mci_stop_dma(host);
662 send_stop_abort(host, data);
663 + dw_mci_stop_dma(host);
664 state = STATE_SENDING_STOP;
665 break;
666 }
667 @@ -2042,10 +2042,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
668 */
669 if (test_and_clear_bit(EVENT_DATA_ERROR,
670 &host->pending_events)) {
671 - dw_mci_stop_dma(host);
672 if (!(host->data_status & (SDMMC_INT_DRTO |
673 SDMMC_INT_EBE)))
674 send_stop_abort(host, data);
675 + dw_mci_stop_dma(host);
676 state = STATE_DATA_ERROR;
677 break;
678 }
679 @@ -2078,10 +2078,10 @@ static void dw_mci_tasklet_func(unsigned long priv)
680 */
681 if (test_and_clear_bit(EVENT_DATA_ERROR,
682 &host->pending_events)) {
683 - dw_mci_stop_dma(host);
684 if (!(host->data_status & (SDMMC_INT_DRTO |
685 SDMMC_INT_EBE)))
686 send_stop_abort(host, data);
687 + dw_mci_stop_dma(host);
688 state = STATE_DATA_ERROR;
689 break;
690 }
691 diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
692 index c8b9ab40a1027..9c98ddef0097d 100644
693 --- a/drivers/mtd/chips/cfi_cmdset_0002.c
694 +++ b/drivers/mtd/chips/cfi_cmdset_0002.c
695 @@ -119,7 +119,7 @@ static int cfi_use_status_reg(struct cfi_private *cfi)
696 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
697 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
698
699 - return extp->MinorVersion >= '5' &&
700 + return extp && extp->MinorVersion >= '5' &&
701 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
702 }
703
704 diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
705 index 287ea792922a9..e67f07faca789 100644
706 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
707 +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
708 @@ -360,6 +360,26 @@ static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
709 return md_dst->u.port_info.port_id;
710 }
711
712 +static bool bnxt_txr_netif_try_stop_queue(struct bnxt *bp,
713 + struct bnxt_tx_ring_info *txr,
714 + struct netdev_queue *txq)
715 +{
716 + netif_tx_stop_queue(txq);
717 +
718 + /* netif_tx_stop_queue() must be done before checking
719 + * tx index in bnxt_tx_avail() below, because in
720 + * bnxt_tx_int(), we update tx index before checking for
721 + * netif_tx_queue_stopped().
722 + */
723 + smp_mb();
724 + if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh) {
725 + netif_tx_wake_queue(txq);
726 + return false;
727 + }
728 +
729 + return true;
730 +}
731 +
732 static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
733 {
734 struct bnxt *bp = netdev_priv(dev);
735 @@ -387,8 +407,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
736
737 free_size = bnxt_tx_avail(bp, txr);
738 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
739 - netif_tx_stop_queue(txq);
740 - return NETDEV_TX_BUSY;
741 + if (bnxt_txr_netif_try_stop_queue(bp, txr, txq))
742 + return NETDEV_TX_BUSY;
743 }
744
745 length = skb->len;
746 @@ -597,16 +617,7 @@ tx_done:
747 if (netdev_xmit_more() && !tx_buf->is_push)
748 bnxt_db_write(bp, &txr->tx_db, prod);
749
750 - netif_tx_stop_queue(txq);
751 -
752 - /* netif_tx_stop_queue() must be done before checking
753 - * tx index in bnxt_tx_avail() below, because in
754 - * bnxt_tx_int(), we update tx index before checking for
755 - * netif_tx_queue_stopped().
756 - */
757 - smp_mb();
758 - if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
759 - netif_tx_wake_queue(txq);
760 + bnxt_txr_netif_try_stop_queue(bp, txr, txq);
761 }
762 return NETDEV_TX_OK;
763
764 @@ -690,14 +701,9 @@ next_tx_int:
765 smp_mb();
766
767 if (unlikely(netif_tx_queue_stopped(txq)) &&
768 - (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
769 - __netif_tx_lock(txq, smp_processor_id());
770 - if (netif_tx_queue_stopped(txq) &&
771 - bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
772 - txr->dev_state != BNXT_DEV_STATE_CLOSING)
773 - netif_tx_wake_queue(txq);
774 - __netif_tx_unlock(txq);
775 - }
776 + bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
777 + READ_ONCE(txr->dev_state) != BNXT_DEV_STATE_CLOSING)
778 + netif_tx_wake_queue(txq);
779 }
780
781 static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
782 @@ -1718,6 +1724,10 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
783 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
784 return -EBUSY;
785
786 + /* The valid test of the entry must be done first before
787 + * reading any further.
788 + */
789 + dma_rmb();
790 prod = rxr->rx_prod;
791
792 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
793 @@ -1912,6 +1922,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp,
794 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
795 return -EBUSY;
796
797 + /* The valid test of the entry must be done first before
798 + * reading any further.
799 + */
800 + dma_rmb();
801 cmp_type = RX_CMP_TYPE(rxcmp);
802 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
803 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
804 @@ -2308,6 +2322,10 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
805 if (!TX_CMP_VALID(txcmp, raw_cons))
806 break;
807
808 + /* The valid test of the entry must be done first before
809 + * reading any further.
810 + */
811 + dma_rmb();
812 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
813 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
814 cp_cons = RING_CMP(tmp_raw_cons);
815 @@ -8340,10 +8358,9 @@ static void bnxt_disable_napi(struct bnxt *bp)
816 for (i = 0; i < bp->cp_nr_rings; i++) {
817 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
818
819 + napi_disable(&bp->bnapi[i]->napi);
820 if (bp->bnapi[i]->rx_ring)
821 cancel_work_sync(&cpr->dim.work);
822 -
823 - napi_disable(&bp->bnapi[i]->napi);
824 }
825 }
826
827 @@ -8371,9 +8388,11 @@ void bnxt_tx_disable(struct bnxt *bp)
828 if (bp->tx_ring) {
829 for (i = 0; i < bp->tx_nr_rings; i++) {
830 txr = &bp->tx_ring[i];
831 - txr->dev_state = BNXT_DEV_STATE_CLOSING;
832 + WRITE_ONCE(txr->dev_state, BNXT_DEV_STATE_CLOSING);
833 }
834 }
835 + /* Make sure napi polls see @dev_state change */
836 + synchronize_net();
837 /* Drop carrier first to prevent TX timeout */
838 netif_carrier_off(bp->dev);
839 /* Stop all TX queues */
840 @@ -8387,8 +8406,10 @@ void bnxt_tx_enable(struct bnxt *bp)
841
842 for (i = 0; i < bp->tx_nr_rings; i++) {
843 txr = &bp->tx_ring[i];
844 - txr->dev_state = 0;
845 + WRITE_ONCE(txr->dev_state, 0);
846 }
847 + /* Make sure napi polls see @dev_state change */
848 + synchronize_net();
849 netif_tx_wake_all_queues(bp->dev);
850 if (bp->link_info.link_up)
851 netif_carrier_on(bp->dev);
852 diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
853 index 8e38c547b53f9..06987913837aa 100644
854 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
855 +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
856 @@ -3553,8 +3553,7 @@ u16 i40e_lan_select_queue(struct net_device *netdev,
857
858 /* is DCB enabled at all? */
859 if (vsi->tc_config.numtc == 1)
860 - return i40e_swdcb_skb_tx_hash(netdev, skb,
861 - netdev->real_num_tx_queues);
862 + return netdev_pick_tx(netdev, skb, sb_dev);
863
864 prio = skb->priority;
865 hw = &vsi->back->hw;
866 diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h
867 index 6b9117a350fac..81ca6472937d2 100644
868 --- a/drivers/net/ethernet/intel/iavf/iavf.h
869 +++ b/drivers/net/ethernet/intel/iavf/iavf.h
870 @@ -134,6 +134,7 @@ struct iavf_q_vector {
871 struct iavf_mac_filter {
872 struct list_head list;
873 u8 macaddr[ETH_ALEN];
874 + bool is_new_mac; /* filter is new, wait for PF decision */
875 bool remove; /* filter needs to be removed */
876 bool add; /* filter needs to be added */
877 };
878 diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
879 index dc902e371c2cf..94a3f000e999b 100644
880 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c
881 +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
882 @@ -761,6 +761,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
883
884 list_add_tail(&f->list, &adapter->mac_filter_list);
885 f->add = true;
886 + f->is_new_mac = true;
887 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
888 } else {
889 f->remove = false;
890 diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
891 index 9655318803b71..4d471a6f2946f 100644
892 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
893 +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
894 @@ -564,6 +564,47 @@ void iavf_del_ether_addrs(struct iavf_adapter *adapter)
895 kfree(veal);
896 }
897
898 +/**
899 + * iavf_mac_add_ok
900 + * @adapter: adapter structure
901 + *
902 + * Submit list of filters based on PF response.
903 + **/
904 +static void iavf_mac_add_ok(struct iavf_adapter *adapter)
905 +{
906 + struct iavf_mac_filter *f, *ftmp;
907 +
908 + spin_lock_bh(&adapter->mac_vlan_list_lock);
909 + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
910 + f->is_new_mac = false;
911 + }
912 + spin_unlock_bh(&adapter->mac_vlan_list_lock);
913 +}
914 +
915 +/**
916 + * iavf_mac_add_reject
917 + * @adapter: adapter structure
918 + *
919 + * Remove filters from list based on PF response.
920 + **/
921 +static void iavf_mac_add_reject(struct iavf_adapter *adapter)
922 +{
923 + struct net_device *netdev = adapter->netdev;
924 + struct iavf_mac_filter *f, *ftmp;
925 +
926 + spin_lock_bh(&adapter->mac_vlan_list_lock);
927 + list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
928 + if (f->remove && ether_addr_equal(f->macaddr, netdev->dev_addr))
929 + f->remove = false;
930 +
931 + if (f->is_new_mac) {
932 + list_del(&f->list);
933 + kfree(f);
934 + }
935 + }
936 + spin_unlock_bh(&adapter->mac_vlan_list_lock);
937 +}
938 +
939 /**
940 * iavf_add_vlans
941 * @adapter: adapter structure
942 @@ -1316,6 +1357,7 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
943 case VIRTCHNL_OP_ADD_ETH_ADDR:
944 dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
945 iavf_stat_str(&adapter->hw, v_retval));
946 + iavf_mac_add_reject(adapter);
947 /* restore administratively set MAC address */
948 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
949 break;
950 @@ -1385,10 +1427,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
951 }
952 }
953 switch (v_opcode) {
954 - case VIRTCHNL_OP_ADD_ETH_ADDR: {
955 + case VIRTCHNL_OP_ADD_ETH_ADDR:
956 + if (!v_retval)
957 + iavf_mac_add_ok(adapter);
958 if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
959 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
960 - }
961 break;
962 case VIRTCHNL_OP_GET_STATS: {
963 struct iavf_eth_stats *stats =
964 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
965 index 29b9c728a65e2..f2014c10f7c97 100644
966 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
967 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
968 @@ -3158,8 +3158,10 @@ int qlcnic_83xx_flash_read32(struct qlcnic_adapter *adapter, u32 flash_addr,
969
970 indirect_addr = QLC_83XX_FLASH_DIRECT_DATA(addr);
971 ret = QLCRD32(adapter, indirect_addr, &err);
972 - if (err == -EIO)
973 + if (err == -EIO) {
974 + qlcnic_83xx_unlock_flash(adapter);
975 return err;
976 + }
977
978 word = ret;
979 *(u32 *)p_data = word;
980 diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
981 index 71d6629e65c97..da13683d52d1a 100644
982 --- a/drivers/net/hamradio/6pack.c
983 +++ b/drivers/net/hamradio/6pack.c
984 @@ -839,6 +839,12 @@ static void decode_data(struct sixpack *sp, unsigned char inbyte)
985 return;
986 }
987
988 + if (sp->rx_count_cooked + 2 >= sizeof(sp->cooked_buf)) {
989 + pr_err("6pack: cooked buffer overrun, data loss\n");
990 + sp->rx_count = 0;
991 + return;
992 + }
993 +
994 buf = sp->raw_buf;
995 sp->cooked_buf[sp->rx_count_cooked++] =
996 buf[0] | ((buf[1] << 2) & 0xc0);
997 diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c
998 index 6a1d3540210bd..ccb3ee704eb1c 100644
999 --- a/drivers/net/phy/mdio-mux.c
1000 +++ b/drivers/net/phy/mdio-mux.c
1001 @@ -82,6 +82,17 @@ out:
1002
1003 static int parent_count;
1004
1005 +static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb)
1006 +{
1007 + struct mdio_mux_child_bus *cb = pb->children;
1008 +
1009 + while (cb) {
1010 + mdiobus_unregister(cb->mii_bus);
1011 + mdiobus_free(cb->mii_bus);
1012 + cb = cb->next;
1013 + }
1014 +}
1015 +
1016 int mdio_mux_init(struct device *dev,
1017 struct device_node *mux_node,
1018 int (*switch_fn)(int cur, int desired, void *data),
1019 @@ -144,7 +155,7 @@ int mdio_mux_init(struct device *dev,
1020 cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL);
1021 if (!cb) {
1022 ret_val = -ENOMEM;
1023 - continue;
1024 + goto err_loop;
1025 }
1026 cb->bus_number = v;
1027 cb->parent = pb;
1028 @@ -152,8 +163,7 @@ int mdio_mux_init(struct device *dev,
1029 cb->mii_bus = mdiobus_alloc();
1030 if (!cb->mii_bus) {
1031 ret_val = -ENOMEM;
1032 - devm_kfree(dev, cb);
1033 - continue;
1034 + goto err_loop;
1035 }
1036 cb->mii_bus->priv = cb;
1037
1038 @@ -165,11 +175,15 @@ int mdio_mux_init(struct device *dev,
1039 cb->mii_bus->write = mdio_mux_write;
1040 r = of_mdiobus_register(cb->mii_bus, child_bus_node);
1041 if (r) {
1042 + mdiobus_free(cb->mii_bus);
1043 + if (r == -EPROBE_DEFER) {
1044 + ret_val = r;
1045 + goto err_loop;
1046 + }
1047 + devm_kfree(dev, cb);
1048 dev_err(dev,
1049 "Error: Failed to register MDIO bus for child %pOF\n",
1050 child_bus_node);
1051 - mdiobus_free(cb->mii_bus);
1052 - devm_kfree(dev, cb);
1053 } else {
1054 cb->next = pb->children;
1055 pb->children = cb;
1056 @@ -182,6 +196,10 @@ int mdio_mux_init(struct device *dev,
1057
1058 dev_err(dev, "Error: No acceptable child buses found\n");
1059 devm_kfree(dev, pb);
1060 +
1061 +err_loop:
1062 + mdio_mux_uninit_children(pb);
1063 + of_node_put(child_bus_node);
1064 err_pb_kz:
1065 put_device(&parent_bus->dev);
1066 err_parent_bus:
1067 @@ -193,14 +211,8 @@ EXPORT_SYMBOL_GPL(mdio_mux_init);
1068 void mdio_mux_uninit(void *mux_handle)
1069 {
1070 struct mdio_mux_parent_bus *pb = mux_handle;
1071 - struct mdio_mux_child_bus *cb = pb->children;
1072 -
1073 - while (cb) {
1074 - mdiobus_unregister(cb->mii_bus);
1075 - mdiobus_free(cb->mii_bus);
1076 - cb = cb->next;
1077 - }
1078
1079 + mdio_mux_uninit_children(pb);
1080 put_device(&pb->mii_bus->dev);
1081 }
1082 EXPORT_SYMBOL_GPL(mdio_mux_uninit);
1083 diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
1084 index 71cc5b63d8ced..92d9d3407b79b 100644
1085 --- a/drivers/net/usb/lan78xx.c
1086 +++ b/drivers/net/usb/lan78xx.c
1087 @@ -1159,7 +1159,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
1088 {
1089 struct phy_device *phydev = dev->net->phydev;
1090 struct ethtool_link_ksettings ecmd;
1091 - int ladv, radv, ret;
1092 + int ladv, radv, ret, link;
1093 u32 buf;
1094
1095 /* clear LAN78xx interrupt status */
1096 @@ -1167,9 +1167,12 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
1097 if (unlikely(ret < 0))
1098 return -EIO;
1099
1100 + mutex_lock(&phydev->lock);
1101 phy_read_status(phydev);
1102 + link = phydev->link;
1103 + mutex_unlock(&phydev->lock);
1104
1105 - if (!phydev->link && dev->link_on) {
1106 + if (!link && dev->link_on) {
1107 dev->link_on = false;
1108
1109 /* reset MAC */
1110 @@ -1182,7 +1185,7 @@ static int lan78xx_link_reset(struct lan78xx_net *dev)
1111 return -EIO;
1112
1113 del_timer(&dev->stat_monitor);
1114 - } else if (phydev->link && !dev->link_on) {
1115 + } else if (link && !dev->link_on) {
1116 dev->link_on = true;
1117
1118 phy_ethtool_ksettings_get(phydev, &ecmd);
1119 @@ -1471,9 +1474,14 @@ static int lan78xx_set_eee(struct net_device *net, struct ethtool_eee *edata)
1120
1121 static u32 lan78xx_get_link(struct net_device *net)
1122 {
1123 + u32 link;
1124 +
1125 + mutex_lock(&net->phydev->lock);
1126 phy_read_status(net->phydev);
1127 + link = net->phydev->link;
1128 + mutex_unlock(&net->phydev->lock);
1129
1130 - return net->phydev->link;
1131 + return link;
1132 }
1133
1134 static void lan78xx_get_drvinfo(struct net_device *net,
1135 diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
1136 index 15453d6fcc232..37c2cecd1e503 100644
1137 --- a/drivers/net/virtio_net.c
1138 +++ b/drivers/net/virtio_net.c
1139 @@ -63,7 +63,7 @@ static const unsigned long guest_offloads[] = {
1140 VIRTIO_NET_F_GUEST_CSUM
1141 };
1142
1143 -#define GUEST_OFFLOAD_LRO_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
1144 +#define GUEST_OFFLOAD_GRO_HW_MASK ((1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
1145 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
1146 (1ULL << VIRTIO_NET_F_GUEST_ECN) | \
1147 (1ULL << VIRTIO_NET_F_GUEST_UFO))
1148 @@ -195,6 +195,9 @@ struct virtnet_info {
1149 /* # of XDP queue pairs currently used by the driver */
1150 u16 xdp_queue_pairs;
1151
1152 + /* xdp_queue_pairs may be 0, when xdp is already loaded. So add this. */
1153 + bool xdp_enabled;
1154 +
1155 /* I like... big packets and I cannot lie! */
1156 bool big_packets;
1157
1158 @@ -485,12 +488,41 @@ static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
1159 return 0;
1160 }
1161
1162 -static struct send_queue *virtnet_xdp_sq(struct virtnet_info *vi)
1163 -{
1164 - unsigned int qp;
1165 -
1166 - qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
1167 - return &vi->sq[qp];
1168 +/* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
1169 + * the current cpu, so it does not need to be locked.
1170 + *
1171 + * Here we use marco instead of inline functions because we have to deal with
1172 + * three issues at the same time: 1. the choice of sq. 2. judge and execute the
1173 + * lock/unlock of txq 3. make sparse happy. It is difficult for two inline
1174 + * functions to perfectly solve these three problems at the same time.
1175 + */
1176 +#define virtnet_xdp_get_sq(vi) ({ \
1177 + struct netdev_queue *txq; \
1178 + typeof(vi) v = (vi); \
1179 + unsigned int qp; \
1180 + \
1181 + if (v->curr_queue_pairs > nr_cpu_ids) { \
1182 + qp = v->curr_queue_pairs - v->xdp_queue_pairs; \
1183 + qp += smp_processor_id(); \
1184 + txq = netdev_get_tx_queue(v->dev, qp); \
1185 + __netif_tx_acquire(txq); \
1186 + } else { \
1187 + qp = smp_processor_id() % v->curr_queue_pairs; \
1188 + txq = netdev_get_tx_queue(v->dev, qp); \
1189 + __netif_tx_lock(txq, raw_smp_processor_id()); \
1190 + } \
1191 + v->sq + qp; \
1192 +})
1193 +
1194 +#define virtnet_xdp_put_sq(vi, q) { \
1195 + struct netdev_queue *txq; \
1196 + typeof(vi) v = (vi); \
1197 + \
1198 + txq = netdev_get_tx_queue(v->dev, (q) - v->sq); \
1199 + if (v->curr_queue_pairs > nr_cpu_ids) \
1200 + __netif_tx_release(txq); \
1201 + else \
1202 + __netif_tx_unlock(txq); \
1203 }
1204
1205 static int virtnet_xdp_xmit(struct net_device *dev,
1206 @@ -516,7 +548,7 @@ static int virtnet_xdp_xmit(struct net_device *dev,
1207 if (!xdp_prog)
1208 return -ENXIO;
1209
1210 - sq = virtnet_xdp_sq(vi);
1211 + sq = virtnet_xdp_get_sq(vi);
1212
1213 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
1214 ret = -EINVAL;
1215 @@ -564,12 +596,13 @@ out:
1216 sq->stats.kicks += kicks;
1217 u64_stats_update_end(&sq->stats.syncp);
1218
1219 + virtnet_xdp_put_sq(vi, sq);
1220 return ret;
1221 }
1222
1223 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1224 {
1225 - return vi->xdp_queue_pairs ? VIRTIO_XDP_HEADROOM : 0;
1226 + return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1227 }
1228
1229 /* We copy the packet for XDP in the following cases:
1230 @@ -1458,12 +1491,13 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
1231 xdp_do_flush_map();
1232
1233 if (xdp_xmit & VIRTIO_XDP_TX) {
1234 - sq = virtnet_xdp_sq(vi);
1235 + sq = virtnet_xdp_get_sq(vi);
1236 if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) {
1237 u64_stats_update_begin(&sq->stats.syncp);
1238 sq->stats.kicks++;
1239 u64_stats_update_end(&sq->stats.syncp);
1240 }
1241 + virtnet_xdp_put_sq(vi, sq);
1242 }
1243
1244 return received;
1245 @@ -2459,7 +2493,7 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1246 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
1247 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
1248 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
1249 - NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing LRO/CSUM, disable LRO/CSUM first");
1250 + NL_SET_ERR_MSG_MOD(extack, "Can't set XDP while host is implementing GRO_HW/CSUM, disable GRO_HW/CSUM first");
1251 return -EOPNOTSUPP;
1252 }
1253
1254 @@ -2480,10 +2514,9 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1255
1256 /* XDP requires extra queues for XDP_TX */
1257 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
1258 - NL_SET_ERR_MSG_MOD(extack, "Too few free TX rings available");
1259 - netdev_warn(dev, "request %i queues but max is %i\n",
1260 + netdev_warn(dev, "XDP request %i queues but max is %i. XDP_TX and XDP_REDIRECT will operate in a slower locked tx mode.\n",
1261 curr_qp + xdp_qp, vi->max_queue_pairs);
1262 - return -ENOMEM;
1263 + xdp_qp = 0;
1264 }
1265
1266 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
1267 @@ -2520,11 +2553,14 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog,
1268 vi->xdp_queue_pairs = xdp_qp;
1269
1270 if (prog) {
1271 + vi->xdp_enabled = true;
1272 for (i = 0; i < vi->max_queue_pairs; i++) {
1273 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
1274 if (i == 0 && !old_prog)
1275 virtnet_clear_guest_offloads(vi);
1276 }
1277 + } else {
1278 + vi->xdp_enabled = false;
1279 }
1280
1281 for (i = 0; i < vi->max_queue_pairs; i++) {
1282 @@ -2608,15 +2644,15 @@ static int virtnet_set_features(struct net_device *dev,
1283 u64 offloads;
1284 int err;
1285
1286 - if ((dev->features ^ features) & NETIF_F_LRO) {
1287 - if (vi->xdp_queue_pairs)
1288 + if ((dev->features ^ features) & NETIF_F_GRO_HW) {
1289 + if (vi->xdp_enabled)
1290 return -EBUSY;
1291
1292 - if (features & NETIF_F_LRO)
1293 + if (features & NETIF_F_GRO_HW)
1294 offloads = vi->guest_offloads_capable;
1295 else
1296 offloads = vi->guest_offloads_capable &
1297 - ~GUEST_OFFLOAD_LRO_MASK;
1298 + ~GUEST_OFFLOAD_GRO_HW_MASK;
1299
1300 err = virtnet_set_guest_offloads(vi, offloads);
1301 if (err)
1302 @@ -3092,9 +3128,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1303 dev->features |= NETIF_F_RXCSUM;
1304 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1305 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6))
1306 - dev->features |= NETIF_F_LRO;
1307 + dev->features |= NETIF_F_GRO_HW;
1308 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS))
1309 - dev->hw_features |= NETIF_F_LRO;
1310 + dev->hw_features |= NETIF_F_GRO_HW;
1311
1312 dev->vlan_features = dev->features;
1313
1314 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
1315 index f08ed52d51f3f..9b626c169554f 100644
1316 --- a/drivers/net/vrf.c
1317 +++ b/drivers/net/vrf.c
1318 @@ -1036,6 +1036,8 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1319 bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
1320 bool is_ndisc = ipv6_ndisc_frame(skb);
1321
1322 + nf_reset_ct(skb);
1323 +
1324 /* loopback, multicast & non-ND link-local traffic; do not push through
1325 * packet taps again. Reset pkt_type for upper layers to process skb.
1326 * For strict packets with a source LLA, determine the dst using the
1327 @@ -1092,6 +1094,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1328 skb->skb_iif = vrf_dev->ifindex;
1329 IPCB(skb)->flags |= IPSKB_L3SLAVE;
1330
1331 + nf_reset_ct(skb);
1332 +
1333 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1334 goto out;
1335
1336 diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
1337 index 7a364eca46d64..f083fb9038c36 100644
1338 --- a/drivers/net/wireless/ath/ath.h
1339 +++ b/drivers/net/wireless/ath/ath.h
1340 @@ -197,12 +197,13 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
1341 bool ath_is_mybeacon(struct ath_common *common, struct ieee80211_hdr *hdr);
1342
1343 void ath_hw_setbssidmask(struct ath_common *common);
1344 -void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key);
1345 +void ath_key_delete(struct ath_common *common, u8 hw_key_idx);
1346 int ath_key_config(struct ath_common *common,
1347 struct ieee80211_vif *vif,
1348 struct ieee80211_sta *sta,
1349 struct ieee80211_key_conf *key);
1350 bool ath_hw_keyreset(struct ath_common *common, u16 entry);
1351 +bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac);
1352 void ath_hw_cycle_counters_update(struct ath_common *common);
1353 int32_t ath_hw_get_listen_time(struct ath_common *common);
1354
1355 diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
1356 index 5e866a193ed04..d065600791c11 100644
1357 --- a/drivers/net/wireless/ath/ath5k/mac80211-ops.c
1358 +++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
1359 @@ -521,7 +521,7 @@ ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1360 }
1361 break;
1362 case DISABLE_KEY:
1363 - ath_key_delete(common, key);
1364 + ath_key_delete(common, key->hw_key_idx);
1365 break;
1366 default:
1367 ret = -EINVAL;
1368 diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1369 index a82ad739ab806..16a7bae62b7d3 100644
1370 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1371 +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
1372 @@ -1460,7 +1460,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1373 }
1374 break;
1375 case DISABLE_KEY:
1376 - ath_key_delete(common, key);
1377 + ath_key_delete(common, key->hw_key_idx);
1378 break;
1379 default:
1380 ret = -EINVAL;
1381 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
1382 index 2e4489700a859..2842ca205a0a9 100644
1383 --- a/drivers/net/wireless/ath/ath9k/hw.h
1384 +++ b/drivers/net/wireless/ath/ath9k/hw.h
1385 @@ -819,6 +819,7 @@ struct ath_hw {
1386 struct ath9k_pacal_info pacal_info;
1387 struct ar5416Stats stats;
1388 struct ath9k_tx_queue_info txq[ATH9K_NUM_TX_QUEUES];
1389 + DECLARE_BITMAP(pending_del_keymap, ATH_KEYMAX);
1390
1391 enum ath9k_int imask;
1392 u32 imrs2_reg;
1393 diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
1394 index d14e01da3c312..28ccdcb197de2 100644
1395 --- a/drivers/net/wireless/ath/ath9k/main.c
1396 +++ b/drivers/net/wireless/ath/ath9k/main.c
1397 @@ -823,12 +823,80 @@ exit:
1398 ieee80211_free_txskb(hw, skb);
1399 }
1400
1401 +static bool ath9k_txq_list_has_key(struct list_head *txq_list, u32 keyix)
1402 +{
1403 + struct ath_buf *bf;
1404 + struct ieee80211_tx_info *txinfo;
1405 + struct ath_frame_info *fi;
1406 +
1407 + list_for_each_entry(bf, txq_list, list) {
1408 + if (bf->bf_state.stale || !bf->bf_mpdu)
1409 + continue;
1410 +
1411 + txinfo = IEEE80211_SKB_CB(bf->bf_mpdu);
1412 + fi = (struct ath_frame_info *)&txinfo->rate_driver_data[0];
1413 + if (fi->keyix == keyix)
1414 + return true;
1415 + }
1416 +
1417 + return false;
1418 +}
1419 +
1420 +static bool ath9k_txq_has_key(struct ath_softc *sc, u32 keyix)
1421 +{
1422 + struct ath_hw *ah = sc->sc_ah;
1423 + int i;
1424 + struct ath_txq *txq;
1425 + bool key_in_use = false;
1426 +
1427 + for (i = 0; !key_in_use && i < ATH9K_NUM_TX_QUEUES; i++) {
1428 + if (!ATH_TXQ_SETUP(sc, i))
1429 + continue;
1430 + txq = &sc->tx.txq[i];
1431 + if (!txq->axq_depth)
1432 + continue;
1433 + if (!ath9k_hw_numtxpending(ah, txq->axq_qnum))
1434 + continue;
1435 +
1436 + ath_txq_lock(sc, txq);
1437 + key_in_use = ath9k_txq_list_has_key(&txq->axq_q, keyix);
1438 + if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1439 + int idx = txq->txq_tailidx;
1440 +
1441 + while (!key_in_use &&
1442 + !list_empty(&txq->txq_fifo[idx])) {
1443 + key_in_use = ath9k_txq_list_has_key(
1444 + &txq->txq_fifo[idx], keyix);
1445 + INCR(idx, ATH_TXFIFO_DEPTH);
1446 + }
1447 + }
1448 + ath_txq_unlock(sc, txq);
1449 + }
1450 +
1451 + return key_in_use;
1452 +}
1453 +
1454 +static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix)
1455 +{
1456 + struct ath_hw *ah = sc->sc_ah;
1457 + struct ath_common *common = ath9k_hw_common(ah);
1458 +
1459 + if (!test_bit(keyix, ah->pending_del_keymap) ||
1460 + ath9k_txq_has_key(sc, keyix))
1461 + return;
1462 +
1463 + /* No more TXQ frames point to this key cache entry, so delete it. */
1464 + clear_bit(keyix, ah->pending_del_keymap);
1465 + ath_key_delete(common, keyix);
1466 +}
1467 +
1468 static void ath9k_stop(struct ieee80211_hw *hw)
1469 {
1470 struct ath_softc *sc = hw->priv;
1471 struct ath_hw *ah = sc->sc_ah;
1472 struct ath_common *common = ath9k_hw_common(ah);
1473 bool prev_idle;
1474 + int i;
1475
1476 ath9k_deinit_channel_context(sc);
1477
1478 @@ -896,6 +964,14 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1479
1480 spin_unlock_bh(&sc->sc_pcu_lock);
1481
1482 + for (i = 0; i < ATH_KEYMAX; i++)
1483 + ath9k_pending_key_del(sc, i);
1484 +
1485 + /* Clear key cache entries explicitly to get rid of any potentially
1486 + * remaining keys.
1487 + */
1488 + ath9k_cmn_init_crypto(sc->sc_ah);
1489 +
1490 ath9k_ps_restore(sc);
1491
1492 sc->ps_idle = prev_idle;
1493 @@ -1541,12 +1617,11 @@ static void ath9k_del_ps_key(struct ath_softc *sc,
1494 {
1495 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1496 struct ath_node *an = (struct ath_node *) sta->drv_priv;
1497 - struct ieee80211_key_conf ps_key = { .hw_key_idx = an->ps_key };
1498
1499 if (!an->ps_key)
1500 return;
1501
1502 - ath_key_delete(common, &ps_key);
1503 + ath_key_delete(common, an->ps_key);
1504 an->ps_key = 0;
1505 an->key_idx[0] = 0;
1506 }
1507 @@ -1708,6 +1783,12 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1508 if (sta)
1509 an = (struct ath_node *)sta->drv_priv;
1510
1511 + /* Delete pending key cache entries if no more frames are pointing to
1512 + * them in TXQs.
1513 + */
1514 + for (i = 0; i < ATH_KEYMAX; i++)
1515 + ath9k_pending_key_del(sc, i);
1516 +
1517 switch (cmd) {
1518 case SET_KEY:
1519 if (sta)
1520 @@ -1737,7 +1818,15 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1521 }
1522 break;
1523 case DISABLE_KEY:
1524 - ath_key_delete(common, key);
1525 + if (ath9k_txq_has_key(sc, key->hw_key_idx)) {
1526 + /* Delay key cache entry deletion until there are no
1527 + * remaining TXQ frames pointing to this entry.
1528 + */
1529 + set_bit(key->hw_key_idx, sc->sc_ah->pending_del_keymap);
1530 + ath_hw_keysetmac(common, key->hw_key_idx, NULL);
1531 + } else {
1532 + ath_key_delete(common, key->hw_key_idx);
1533 + }
1534 if (an) {
1535 for (i = 0; i < ARRAY_SIZE(an->key_idx); i++) {
1536 if (an->key_idx[i] != key->hw_key_idx)
1537 diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
1538 index 1816b4e7dc264..61b59a804e308 100644
1539 --- a/drivers/net/wireless/ath/key.c
1540 +++ b/drivers/net/wireless/ath/key.c
1541 @@ -84,8 +84,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
1542 }
1543 EXPORT_SYMBOL(ath_hw_keyreset);
1544
1545 -static bool ath_hw_keysetmac(struct ath_common *common,
1546 - u16 entry, const u8 *mac)
1547 +bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
1548 {
1549 u32 macHi, macLo;
1550 u32 unicast_flag = AR_KEYTABLE_VALID;
1551 @@ -125,6 +124,7 @@ static bool ath_hw_keysetmac(struct ath_common *common,
1552
1553 return true;
1554 }
1555 +EXPORT_SYMBOL(ath_hw_keysetmac);
1556
1557 static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
1558 const struct ath_keyval *k,
1559 @@ -581,29 +581,38 @@ EXPORT_SYMBOL(ath_key_config);
1560 /*
1561 * Delete Key.
1562 */
1563 -void ath_key_delete(struct ath_common *common, struct ieee80211_key_conf *key)
1564 +void ath_key_delete(struct ath_common *common, u8 hw_key_idx)
1565 {
1566 - ath_hw_keyreset(common, key->hw_key_idx);
1567 - if (key->hw_key_idx < IEEE80211_WEP_NKID)
1568 + /* Leave CCMP and TKIP (main key) configured to avoid disabling
1569 + * encryption for potentially pending frames already in a TXQ with the
1570 + * keyix pointing to this key entry. Instead, only clear the MAC address
1571 + * to prevent RX processing from using this key cache entry.
1572 + */
1573 + if (test_bit(hw_key_idx, common->ccmp_keymap) ||
1574 + test_bit(hw_key_idx, common->tkip_keymap))
1575 + ath_hw_keysetmac(common, hw_key_idx, NULL);
1576 + else
1577 + ath_hw_keyreset(common, hw_key_idx);
1578 + if (hw_key_idx < IEEE80211_WEP_NKID)
1579 return;
1580
1581 - clear_bit(key->hw_key_idx, common->keymap);
1582 - clear_bit(key->hw_key_idx, common->ccmp_keymap);
1583 - if (key->cipher != WLAN_CIPHER_SUITE_TKIP)
1584 + clear_bit(hw_key_idx, common->keymap);
1585 + clear_bit(hw_key_idx, common->ccmp_keymap);
1586 + if (!test_bit(hw_key_idx, common->tkip_keymap))
1587 return;
1588
1589 - clear_bit(key->hw_key_idx + 64, common->keymap);
1590 + clear_bit(hw_key_idx + 64, common->keymap);
1591
1592 - clear_bit(key->hw_key_idx, common->tkip_keymap);
1593 - clear_bit(key->hw_key_idx + 64, common->tkip_keymap);
1594 + clear_bit(hw_key_idx, common->tkip_keymap);
1595 + clear_bit(hw_key_idx + 64, common->tkip_keymap);
1596
1597 if (!(common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)) {
1598 - ath_hw_keyreset(common, key->hw_key_idx + 32);
1599 - clear_bit(key->hw_key_idx + 32, common->keymap);
1600 - clear_bit(key->hw_key_idx + 64 + 32, common->keymap);
1601 + ath_hw_keyreset(common, hw_key_idx + 32);
1602 + clear_bit(hw_key_idx + 32, common->keymap);
1603 + clear_bit(hw_key_idx + 64 + 32, common->keymap);
1604
1605 - clear_bit(key->hw_key_idx + 32, common->tkip_keymap);
1606 - clear_bit(key->hw_key_idx + 64 + 32, common->tkip_keymap);
1607 + clear_bit(hw_key_idx + 32, common->tkip_keymap);
1608 + clear_bit(hw_key_idx + 64 + 32, common->tkip_keymap);
1609 }
1610 }
1611 EXPORT_SYMBOL(ath_key_delete);
1612 diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
1613 index 6c1b936a94fac..0241f0dcc093f 100644
1614 --- a/drivers/pci/quirks.c
1615 +++ b/drivers/pci/quirks.c
1616 @@ -1905,6 +1905,7 @@ static void quirk_ryzen_xhci_d3hot(struct pci_dev *dev)
1617 }
1618 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e0, quirk_ryzen_xhci_d3hot);
1619 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x15e1, quirk_ryzen_xhci_d3hot);
1620 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x1639, quirk_ryzen_xhci_d3hot);
1621
1622 #ifdef CONFIG_X86_IO_APIC
1623 static int dmi_disable_ioapicreroute(const struct dmi_system_id *d)
1624 diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
1625 index 0517272a268ed..9fb6f7643ea96 100644
1626 --- a/drivers/ptp/Kconfig
1627 +++ b/drivers/ptp/Kconfig
1628 @@ -92,7 +92,8 @@ config DP83640_PHY
1629 config PTP_1588_CLOCK_PCH
1630 tristate "Intel PCH EG20T as PTP clock"
1631 depends on X86_32 || COMPILE_TEST
1632 - depends on HAS_IOMEM && NET
1633 + depends on HAS_IOMEM && PCI
1634 + depends on NET
1635 imply PTP_1588_CLOCK
1636 help
1637 This driver adds support for using the PCH EG20T as a PTP
1638 diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
1639 index 5efc959493ecd..85a71bafaea76 100644
1640 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c
1641 +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
1642 @@ -453,8 +453,8 @@ static int initialize_controller(struct scsi_device *sdev,
1643 if (!h->ctlr)
1644 err = SCSI_DH_RES_TEMP_UNAVAIL;
1645 else {
1646 - list_add_rcu(&h->node, &h->ctlr->dh_list);
1647 h->sdev = sdev;
1648 + list_add_rcu(&h->node, &h->ctlr->dh_list);
1649 }
1650 spin_unlock(&list_lock);
1651 err = SCSI_DH_OK;
1652 @@ -778,11 +778,11 @@ static void rdac_bus_detach( struct scsi_device *sdev )
1653 spin_lock(&list_lock);
1654 if (h->ctlr) {
1655 list_del_rcu(&h->node);
1656 - h->sdev = NULL;
1657 kref_put(&h->ctlr->kref, release_controller);
1658 }
1659 spin_unlock(&list_lock);
1660 sdev->handler_data = NULL;
1661 + synchronize_rcu();
1662 kfree(h);
1663 }
1664
1665 diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c
1666 index 59cca898f0882..fcfbf3343b64e 100644
1667 --- a/drivers/scsi/megaraid/megaraid_mm.c
1668 +++ b/drivers/scsi/megaraid/megaraid_mm.c
1669 @@ -246,7 +246,7 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
1670 mimd_t mimd;
1671 uint32_t adapno;
1672 int iterator;
1673 -
1674 + bool is_found;
1675
1676 if (copy_from_user(&mimd, umimd, sizeof(mimd_t))) {
1677 *rval = -EFAULT;
1678 @@ -262,12 +262,16 @@ mraid_mm_get_adapter(mimd_t __user *umimd, int *rval)
1679
1680 adapter = NULL;
1681 iterator = 0;
1682 + is_found = false;
1683
1684 list_for_each_entry(adapter, &adapters_list_g, list) {
1685 - if (iterator++ == adapno) break;
1686 + if (iterator++ == adapno) {
1687 + is_found = true;
1688 + break;
1689 + }
1690 }
1691
1692 - if (!adapter) {
1693 + if (!is_found) {
1694 *rval = -ENODEV;
1695 return NULL;
1696 }
1697 @@ -733,6 +737,7 @@ ioctl_done(uioc_t *kioc)
1698 uint32_t adapno;
1699 int iterator;
1700 mraid_mmadp_t* adapter;
1701 + bool is_found;
1702
1703 /*
1704 * When the kioc returns from driver, make sure it still doesn't
1705 @@ -755,19 +760,23 @@ ioctl_done(uioc_t *kioc)
1706 iterator = 0;
1707 adapter = NULL;
1708 adapno = kioc->adapno;
1709 + is_found = false;
1710
1711 con_log(CL_ANN, ( KERN_WARNING "megaraid cmm: completed "
1712 "ioctl that was timedout before\n"));
1713
1714 list_for_each_entry(adapter, &adapters_list_g, list) {
1715 - if (iterator++ == adapno) break;
1716 + if (iterator++ == adapno) {
1717 + is_found = true;
1718 + break;
1719 + }
1720 }
1721
1722 kioc->timedout = 0;
1723
1724 - if (adapter) {
1725 + if (is_found)
1726 mraid_mm_dealloc_kioc( adapter, kioc );
1727 - }
1728 +
1729 }
1730 else {
1731 wake_up(&wait_q);
1732 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
1733 index 79232cef1af16..3fd109fd9335d 100644
1734 --- a/drivers/scsi/scsi_scan.c
1735 +++ b/drivers/scsi/scsi_scan.c
1736 @@ -454,7 +454,8 @@ static struct scsi_target *scsi_alloc_target(struct device *parent,
1737 error = shost->hostt->target_alloc(starget);
1738
1739 if(error) {
1740 - dev_printk(KERN_ERR, dev, "target allocation failed, error %d\n", error);
1741 + if (error != -ENXIO)
1742 + dev_err(dev, "target allocation failed, error %d\n", error);
1743 /* don't want scsi_target_reap to do the final
1744 * put because it will be under the host lock */
1745 scsi_target_destroy(starget);
1746 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
1747 index 6d7362e7367ed..11592ec7b23ea 100644
1748 --- a/drivers/scsi/scsi_sysfs.c
1749 +++ b/drivers/scsi/scsi_sysfs.c
1750 @@ -787,11 +787,14 @@ store_state_field(struct device *dev, struct device_attribute *attr,
1751 mutex_lock(&sdev->state_mutex);
1752 ret = scsi_device_set_state(sdev, state);
1753 /*
1754 - * If the device state changes to SDEV_RUNNING, we need to run
1755 - * the queue to avoid I/O hang.
1756 + * If the device state changes to SDEV_RUNNING, we need to
1757 + * rescan the device to revalidate it, and run the queue to
1758 + * avoid I/O hang.
1759 */
1760 - if (ret == 0 && state == SDEV_RUNNING)
1761 + if (ret == 0 && state == SDEV_RUNNING) {
1762 + scsi_rescan_device(dev);
1763 blk_mq_run_hw_queues(sdev->request_queue, true);
1764 + }
1765 mutex_unlock(&sdev->state_mutex);
1766
1767 return ret == 0 ? count : -EINVAL;
1768 diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
1769 index d5879142dbef1..ddf0371ad52b2 100644
1770 --- a/drivers/slimbus/messaging.c
1771 +++ b/drivers/slimbus/messaging.c
1772 @@ -66,7 +66,7 @@ int slim_alloc_txn_tid(struct slim_controller *ctrl, struct slim_msg_txn *txn)
1773 int ret = 0;
1774
1775 spin_lock_irqsave(&ctrl->txn_lock, flags);
1776 - ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 0,
1777 + ret = idr_alloc_cyclic(&ctrl->tid_idr, txn, 1,
1778 SLIM_MAX_TIDS, GFP_ATOMIC);
1779 if (ret < 0) {
1780 spin_unlock_irqrestore(&ctrl->txn_lock, flags);
1781 @@ -131,7 +131,8 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
1782 goto slim_xfer_err;
1783 }
1784 }
1785 -
1786 + /* Initialize tid to invalid value */
1787 + txn->tid = 0;
1788 need_tid = slim_tid_txn(txn->mt, txn->mc);
1789
1790 if (need_tid) {
1791 @@ -163,7 +164,7 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
1792 txn->mt, txn->mc, txn->la, ret);
1793
1794 slim_xfer_err:
1795 - if (!clk_pause_msg && (!need_tid || ret == -ETIMEDOUT)) {
1796 + if (!clk_pause_msg && (txn->tid == 0 || ret == -ETIMEDOUT)) {
1797 /*
1798 * remove runtime-pm vote if this was TX only, or
1799 * if there was error during this transaction
1800 diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
1801 index b60541c3f72da..09ecd1fb24ae3 100644
1802 --- a/drivers/slimbus/qcom-ngd-ctrl.c
1803 +++ b/drivers/slimbus/qcom-ngd-ctrl.c
1804 @@ -1061,7 +1061,8 @@ static void qcom_slim_ngd_setup(struct qcom_slim_ngd_ctrl *ctrl)
1805 {
1806 u32 cfg = readl_relaxed(ctrl->ngd->base);
1807
1808 - if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN)
1809 + if (ctrl->state == QCOM_SLIM_NGD_CTRL_DOWN ||
1810 + ctrl->state == QCOM_SLIM_NGD_CTRL_ASLEEP)
1811 qcom_slim_ngd_init_dma(ctrl);
1812
1813 /* By default enable message queues */
1814 @@ -1112,6 +1113,7 @@ static int qcom_slim_ngd_power_up(struct qcom_slim_ngd_ctrl *ctrl)
1815 dev_info(ctrl->dev, "Subsys restart: ADSP active framer\n");
1816 return 0;
1817 }
1818 + qcom_slim_ngd_setup(ctrl);
1819 return 0;
1820 }
1821
1822 @@ -1500,6 +1502,7 @@ static int __maybe_unused qcom_slim_ngd_runtime_suspend(struct device *dev)
1823 struct qcom_slim_ngd_ctrl *ctrl = dev_get_drvdata(dev);
1824 int ret = 0;
1825
1826 + qcom_slim_ngd_exit_dma(ctrl);
1827 if (!ctrl->qmi.handle)
1828 return 0;
1829
1830 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
1831 index 041c68ea329f4..7ca908704777c 100644
1832 --- a/drivers/usb/core/message.c
1833 +++ b/drivers/usb/core/message.c
1834 @@ -647,6 +647,9 @@ int usb_get_descriptor(struct usb_device *dev, unsigned char type,
1835 int i;
1836 int result;
1837
1838 + if (size <= 0) /* No point in asking for no data */
1839 + return -EINVAL;
1840 +
1841 memset(buf, 0, size); /* Make sure we parse really received data */
1842
1843 for (i = 0; i < 3; ++i) {
1844 @@ -695,6 +698,9 @@ static int usb_get_string(struct usb_device *dev, unsigned short langid,
1845 int i;
1846 int result;
1847
1848 + if (size <= 0) /* No point in asking for no data */
1849 + return -EINVAL;
1850 +
1851 for (i = 0; i < 3; ++i) {
1852 /* retry on length 0 or stall; some devices are flakey */
1853 result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0),
1854 diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
1855 index a279ecacbf60a..97be299f0a8dc 100644
1856 --- a/drivers/vhost/vhost.c
1857 +++ b/drivers/vhost/vhost.c
1858 @@ -702,10 +702,16 @@ static bool log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
1859 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
1860 }
1861
1862 +/* Make sure 64 bit math will not overflow. */
1863 static bool vhost_overflow(u64 uaddr, u64 size)
1864 {
1865 - /* Make sure 64 bit math will not overflow. */
1866 - return uaddr > ULONG_MAX || size > ULONG_MAX || uaddr > ULONG_MAX - size;
1867 + if (uaddr > ULONG_MAX || size > ULONG_MAX)
1868 + return true;
1869 +
1870 + if (!size)
1871 + return false;
1872 +
1873 + return uaddr > ULONG_MAX - size + 1;
1874 }
1875
1876 /* Caller should have vq mutex and device mutex. */
1877 diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
1878 index a977e32a88f2f..59a05f1b81054 100644
1879 --- a/drivers/virtio/virtio.c
1880 +++ b/drivers/virtio/virtio.c
1881 @@ -342,6 +342,7 @@ int register_virtio_device(struct virtio_device *dev)
1882 virtio_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE);
1883
1884 INIT_LIST_HEAD(&dev->vqs);
1885 + spin_lock_init(&dev->vqs_list_lock);
1886
1887 /*
1888 * device_add() causes the bus infrastructure to look for a matching
1889 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1890 index 97e8a195e18f5..f6011c9ed32f1 100644
1891 --- a/drivers/virtio/virtio_ring.c
1892 +++ b/drivers/virtio/virtio_ring.c
1893 @@ -1668,7 +1668,9 @@ static struct virtqueue *vring_create_virtqueue_packed(
1894 cpu_to_le16(vq->packed.event_flags_shadow);
1895 }
1896
1897 + spin_lock(&vdev->vqs_list_lock);
1898 list_add_tail(&vq->vq.list, &vdev->vqs);
1899 + spin_unlock(&vdev->vqs_list_lock);
1900 return &vq->vq;
1901
1902 err_desc_extra:
1903 @@ -2126,7 +2128,9 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
1904 memset(vq->split.desc_state, 0, vring.num *
1905 sizeof(struct vring_desc_state_split));
1906
1907 + spin_lock(&vdev->vqs_list_lock);
1908 list_add_tail(&vq->vq.list, &vdev->vqs);
1909 + spin_unlock(&vdev->vqs_list_lock);
1910 return &vq->vq;
1911 }
1912 EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1913 @@ -2210,7 +2214,9 @@ void vring_del_virtqueue(struct virtqueue *_vq)
1914 }
1915 if (!vq->packed_ring)
1916 kfree(vq->split.desc_state);
1917 + spin_lock(&vq->vq.vdev->vqs_list_lock);
1918 list_del(&_vq->list);
1919 + spin_unlock(&vq->vq.vdev->vqs_list_lock);
1920 kfree(vq);
1921 }
1922 EXPORT_SYMBOL_GPL(vring_del_virtqueue);
1923 @@ -2274,10 +2280,12 @@ void virtio_break_device(struct virtio_device *dev)
1924 {
1925 struct virtqueue *_vq;
1926
1927 + spin_lock(&dev->vqs_list_lock);
1928 list_for_each_entry(_vq, &dev->vqs, list) {
1929 struct vring_virtqueue *vq = to_vvq(_vq);
1930 vq->broken = true;
1931 }
1932 + spin_unlock(&dev->vqs_list_lock);
1933 }
1934 EXPORT_SYMBOL_GPL(virtio_break_device);
1935
1936 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
1937 index b044b1d910dec..54b607a3cc3f2 100644
1938 --- a/fs/btrfs/inode.c
1939 +++ b/fs/btrfs/inode.c
1940 @@ -9702,8 +9702,14 @@ static int btrfs_rename_exchange(struct inode *old_dir,
1941 bool root_log_pinned = false;
1942 bool dest_log_pinned = false;
1943
1944 - /* we only allow rename subvolume link between subvolumes */
1945 - if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest)
1946 + /*
1947 + * For non-subvolumes allow exchange only within one subvolume, in the
1948 + * same inode namespace. Two subvolumes (represented as directory) can
1949 + * be exchanged as they're a logical link and have a fixed inode number.
1950 + */
1951 + if (root != dest &&
1952 + (old_ino != BTRFS_FIRST_FREE_OBJECTID ||
1953 + new_ino != BTRFS_FIRST_FREE_OBJECTID))
1954 return -EXDEV;
1955
1956 /* close the race window with snapshot create/destroy ioctl */
1957 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
1958 index bf3eaa9030335..ae2cb15d95407 100644
1959 --- a/fs/ext4/ext4.h
1960 +++ b/fs/ext4/ext4.h
1961 @@ -718,7 +718,7 @@ enum {
1962 #define EXT4_MAX_BLOCK_FILE_PHYS 0xFFFFFFFF
1963
1964 /* Max logical block we can support */
1965 -#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFF
1966 +#define EXT4_MAX_LOGICAL_BLOCK 0xFFFFFFFE
1967
1968 /*
1969 * Structure of an inode on the disk
1970 diff --git a/fs/namespace.c b/fs/namespace.c
1971 index a092611d89e77..5782cd55dfdbb 100644
1972 --- a/fs/namespace.c
1973 +++ b/fs/namespace.c
1974 @@ -1647,8 +1647,12 @@ static inline bool may_mount(void)
1975 }
1976
1977 #ifdef CONFIG_MANDATORY_FILE_LOCKING
1978 -static inline bool may_mandlock(void)
1979 +static bool may_mandlock(void)
1980 {
1981 + pr_warn_once("======================================================\n"
1982 + "WARNING: the mand mount option is being deprecated and\n"
1983 + " will be removed in v5.15!\n"
1984 + "======================================================\n");
1985 return capable(CAP_SYS_ADMIN);
1986 }
1987 #else
1988 diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
1989 index 7a08a576f7b29..ab5e92897270a 100644
1990 --- a/fs/overlayfs/file.c
1991 +++ b/fs/overlayfs/file.c
1992 @@ -9,6 +9,9 @@
1993 #include <linux/xattr.h>
1994 #include <linux/uio.h>
1995 #include <linux/uaccess.h>
1996 +#include <linux/splice.h>
1997 +#include <linux/mm.h>
1998 +#include <linux/fs.h>
1999 #include "overlayfs.h"
2000
2001 static char ovl_whatisit(struct inode *inode, struct inode *realinode)
2002 @@ -293,6 +296,48 @@ out_unlock:
2003 return ret;
2004 }
2005
2006 +static ssize_t ovl_splice_read(struct file *in, loff_t *ppos,
2007 + struct pipe_inode_info *pipe, size_t len,
2008 + unsigned int flags)
2009 +{
2010 + ssize_t ret;
2011 + struct fd real;
2012 + const struct cred *old_cred;
2013 +
2014 + ret = ovl_real_fdget(in, &real);
2015 + if (ret)
2016 + return ret;
2017 +
2018 + old_cred = ovl_override_creds(file_inode(in)->i_sb);
2019 + ret = generic_file_splice_read(real.file, ppos, pipe, len, flags);
2020 + revert_creds(old_cred);
2021 +
2022 + ovl_file_accessed(in);
2023 + fdput(real);
2024 + return ret;
2025 +}
2026 +
2027 +static ssize_t
2028 +ovl_splice_write(struct pipe_inode_info *pipe, struct file *out,
2029 + loff_t *ppos, size_t len, unsigned int flags)
2030 +{
2031 + struct fd real;
2032 + const struct cred *old_cred;
2033 + ssize_t ret;
2034 +
2035 + ret = ovl_real_fdget(out, &real);
2036 + if (ret)
2037 + return ret;
2038 +
2039 + old_cred = ovl_override_creds(file_inode(out)->i_sb);
2040 + ret = iter_file_splice_write(pipe, real.file, ppos, len, flags);
2041 + revert_creds(old_cred);
2042 +
2043 + ovl_file_accessed(out);
2044 + fdput(real);
2045 + return ret;
2046 +}
2047 +
2048 static int ovl_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2049 {
2050 struct fd real;
2051 @@ -649,6 +694,8 @@ const struct file_operations ovl_file_operations = {
2052 .fadvise = ovl_fadvise,
2053 .unlocked_ioctl = ovl_ioctl,
2054 .compat_ioctl = ovl_compat_ioctl,
2055 + .splice_read = ovl_splice_read,
2056 + .splice_write = ovl_splice_write,
2057
2058 .copy_file_range = ovl_copy_file_range,
2059 .remap_file_range = ovl_remap_file_range,
2060 diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
2061 index fb5b2a41bd456..b6d0b68f55032 100644
2062 --- a/include/linux/memcontrol.h
2063 +++ b/include/linux/memcontrol.h
2064 @@ -356,17 +356,54 @@ static inline bool mem_cgroup_disabled(void)
2065 return !cgroup_subsys_enabled(memory_cgrp_subsys);
2066 }
2067
2068 -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
2069 - bool in_low_reclaim)
2070 +static inline void mem_cgroup_protection(struct mem_cgroup *root,
2071 + struct mem_cgroup *memcg,
2072 + unsigned long *min,
2073 + unsigned long *low)
2074 {
2075 + *min = *low = 0;
2076 +
2077 if (mem_cgroup_disabled())
2078 - return 0;
2079 + return;
2080
2081 - if (in_low_reclaim)
2082 - return READ_ONCE(memcg->memory.emin);
2083 + /*
2084 + * There is no reclaim protection applied to a targeted reclaim.
2085 + * We are special casing this specific case here because
2086 + * mem_cgroup_protected calculation is not robust enough to keep
2087 + * the protection invariant for calculated effective values for
2088 + * parallel reclaimers with different reclaim target. This is
2089 + * especially a problem for tail memcgs (as they have pages on LRU)
2090 + * which would want to have effective values 0 for targeted reclaim
2091 + * but a different value for external reclaim.
2092 + *
2093 + * Example
2094 + * Let's have global and A's reclaim in parallel:
2095 + * |
2096 + * A (low=2G, usage = 3G, max = 3G, children_low_usage = 1.5G)
2097 + * |\
2098 + * | C (low = 1G, usage = 2.5G)
2099 + * B (low = 1G, usage = 0.5G)
2100 + *
2101 + * For the global reclaim
2102 + * A.elow = A.low
2103 + * B.elow = min(B.usage, B.low) because children_low_usage <= A.elow
2104 + * C.elow = min(C.usage, C.low)
2105 + *
2106 + * With the effective values resetting we have A reclaim
2107 + * A.elow = 0
2108 + * B.elow = B.low
2109 + * C.elow = C.low
2110 + *
2111 + * If the global reclaim races with A's reclaim then
2112 + * B.elow = C.elow = 0 because children_low_usage > A.elow)
2113 + * is possible and reclaiming B would be violating the protection.
2114 + *
2115 + */
2116 + if (root == memcg)
2117 + return;
2118
2119 - return max(READ_ONCE(memcg->memory.emin),
2120 - READ_ONCE(memcg->memory.elow));
2121 + *min = READ_ONCE(memcg->memory.emin);
2122 + *low = READ_ONCE(memcg->memory.elow);
2123 }
2124
2125 enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
2126 @@ -847,10 +884,12 @@ static inline void memcg_memory_event_mm(struct mm_struct *mm,
2127 {
2128 }
2129
2130 -static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
2131 - bool in_low_reclaim)
2132 +static inline void mem_cgroup_protection(struct mem_cgroup *root,
2133 + struct mem_cgroup *memcg,
2134 + unsigned long *min,
2135 + unsigned long *low)
2136 {
2137 - return 0;
2138 + *min = *low = 0;
2139 }
2140
2141 static inline enum mem_cgroup_protection mem_cgroup_protected(
2142 diff --git a/include/linux/virtio.h b/include/linux/virtio.h
2143 index 15f906e4a748f..7c075463c7f2b 100644
2144 --- a/include/linux/virtio.h
2145 +++ b/include/linux/virtio.h
2146 @@ -111,6 +111,7 @@ struct virtio_device {
2147 bool config_enabled;
2148 bool config_change_pending;
2149 spinlock_t config_lock;
2150 + spinlock_t vqs_list_lock; /* Protects VQs list access */
2151 struct device dev;
2152 struct virtio_device_id id;
2153 const struct virtio_config_ops *config;
2154 diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
2155 index 52c2b11a0b471..0b5a446ee59c9 100644
2156 --- a/kernel/bpf/verifier.c
2157 +++ b/kernel/bpf/verifier.c
2158 @@ -8586,6 +8586,7 @@ static void sanitize_dead_code(struct bpf_verifier_env *env)
2159 if (aux_data[i].seen)
2160 continue;
2161 memcpy(insn + i, &trap, sizeof(trap));
2162 + aux_data[i].zext_dst = false;
2163 }
2164 }
2165
2166 diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
2167 index e40712abe089e..f63766366e238 100644
2168 --- a/kernel/trace/trace_events_hist.c
2169 +++ b/kernel/trace/trace_events_hist.c
2170 @@ -4291,6 +4291,8 @@ trace_action_create_field_var(struct hist_trigger_data *hist_data,
2171 event = data->match_data.event;
2172 }
2173
2174 + if (!event)
2175 + goto free;
2176 /*
2177 * At this point, we're looking at a field on another
2178 * event. Because we can't modify a hist trigger on
2179 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
2180 index 2701497edda5c..6d7fe3589e4a0 100644
2181 --- a/mm/memcontrol.c
2182 +++ b/mm/memcontrol.c
2183 @@ -6446,6 +6446,14 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
2184
2185 if (!root)
2186 root = root_mem_cgroup;
2187 +
2188 + /*
2189 + * Effective values of the reclaim targets are ignored so they
2190 + * can be stale. Have a look at mem_cgroup_protection for more
2191 + * details.
2192 + * TODO: calculation should be more robust so that we do not need
2193 + * that special casing.
2194 + */
2195 if (memcg == root)
2196 return MEMCG_PROT_NONE;
2197
2198 diff --git a/mm/vmscan.c b/mm/vmscan.c
2199 index 10feb872d9a4f..fad9be4703ece 100644
2200 --- a/mm/vmscan.c
2201 +++ b/mm/vmscan.c
2202 @@ -89,9 +89,12 @@ struct scan_control {
2203 unsigned int may_swap:1;
2204
2205 /*
2206 - * Cgroups are not reclaimed below their configured memory.low,
2207 - * unless we threaten to OOM. If any cgroups are skipped due to
2208 - * memory.low and nothing was reclaimed, go back for memory.low.
2209 + * Cgroup memory below memory.low is protected as long as we
2210 + * don't threaten to OOM. If any cgroup is reclaimed at
2211 + * reduced force or passed over entirely due to its memory.low
2212 + * setting (memcg_low_skipped), and nothing is reclaimed as a
2213 + * result, then go back for one more cycle that reclaims the protected
2214 + * memory (memcg_low_reclaim) to avert OOM.
2215 */
2216 unsigned int memcg_low_reclaim:1;
2217 unsigned int memcg_low_skipped:1;
2218 @@ -2458,14 +2461,14 @@ out:
2219 for_each_evictable_lru(lru) {
2220 int file = is_file_lru(lru);
2221 unsigned long lruvec_size;
2222 + unsigned long low, min;
2223 unsigned long scan;
2224 - unsigned long protection;
2225
2226 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx);
2227 - protection = mem_cgroup_protection(memcg,
2228 - sc->memcg_low_reclaim);
2229 + mem_cgroup_protection(sc->target_mem_cgroup, memcg,
2230 + &min, &low);
2231
2232 - if (protection) {
2233 + if (min || low) {
2234 /*
2235 * Scale a cgroup's reclaim pressure by proportioning
2236 * its current usage to its memory.low or memory.min
2237 @@ -2496,6 +2499,15 @@ out:
2238 * hard protection.
2239 */
2240 unsigned long cgroup_size = mem_cgroup_size(memcg);
2241 + unsigned long protection;
2242 +
2243 + /* memory.low scaling, make sure we retry before OOM */
2244 + if (!sc->memcg_low_reclaim && low > min) {
2245 + protection = low;
2246 + sc->memcg_low_skipped = 1;
2247 + } else {
2248 + protection = min;
2249 + }
2250
2251 /* Avoid TOCTOU with earlier protection check */
2252 cgroup_size = max(cgroup_size, protection);
2253 diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
2254 index bef84b95e2c47..ac98e3b37ab47 100644
2255 --- a/net/bluetooth/hidp/core.c
2256 +++ b/net/bluetooth/hidp/core.c
2257 @@ -1290,7 +1290,7 @@ static int hidp_session_thread(void *arg)
2258
2259 /* cleanup runtime environment */
2260 remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
2261 - remove_wait_queue(sk_sleep(session->intr_sock->sk), &ctrl_wait);
2262 + remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait);
2263 wake_up_interruptible(&session->report_queue);
2264 hidp_del_timer(session);
2265
2266 diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
2267 index 9c3b27c257bbf..cb818617699c3 100644
2268 --- a/net/dccp/dccp.h
2269 +++ b/net/dccp/dccp.h
2270 @@ -41,9 +41,9 @@ extern bool dccp_debug;
2271 #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a)
2272 #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a)
2273 #else
2274 -#define dccp_pr_debug(format, a...)
2275 -#define dccp_pr_debug_cat(format, a...)
2276 -#define dccp_debug(format, a...)
2277 +#define dccp_pr_debug(format, a...) do {} while (0)
2278 +#define dccp_pr_debug_cat(format, a...) do {} while (0)
2279 +#define dccp_debug(format, a...) do {} while (0)
2280 #endif
2281
2282 extern struct inet_hashinfo dccp_hashinfo;
2283 diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
2284 index 00f4323cfeb87..faa0844c01fb8 100644
2285 --- a/net/netfilter/nft_exthdr.c
2286 +++ b/net/netfilter/nft_exthdr.c
2287 @@ -231,7 +231,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
2288 unsigned int i, optl, tcphdr_len, offset;
2289 struct tcphdr *tcph;
2290 u8 *opt;
2291 - u32 src;
2292
2293 tcph = nft_tcp_header_pointer(pkt, sizeof(buff), buff, &tcphdr_len);
2294 if (!tcph)
2295 @@ -240,7 +239,6 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
2296 opt = (u8 *)tcph;
2297 for (i = sizeof(*tcph); i < tcphdr_len - 1; i += optl) {
2298 union {
2299 - u8 octet;
2300 __be16 v16;
2301 __be32 v32;
2302 } old, new;
2303 @@ -262,13 +260,13 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
2304 if (!tcph)
2305 return;
2306
2307 - src = regs->data[priv->sreg];
2308 offset = i + priv->offset;
2309
2310 switch (priv->len) {
2311 case 2:
2312 old.v16 = get_unaligned((u16 *)(opt + offset));
2313 - new.v16 = src;
2314 + new.v16 = (__force __be16)nft_reg_load16(
2315 + &regs->data[priv->sreg]);
2316
2317 switch (priv->type) {
2318 case TCPOPT_MSS:
2319 @@ -286,7 +284,7 @@ static void nft_exthdr_tcp_set_eval(const struct nft_expr *expr,
2320 old.v16, new.v16, false);
2321 break;
2322 case 4:
2323 - new.v32 = src;
2324 + new.v32 = regs->data[priv->sreg];
2325 old.v32 = get_unaligned((u32 *)(opt + offset));
2326
2327 if (old.v32 == new.v32)
2328 diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
2329 index 3fc38d16c4568..19af0efeb8dc1 100644
2330 --- a/net/openvswitch/vport.c
2331 +++ b/net/openvswitch/vport.c
2332 @@ -499,6 +499,7 @@ void ovs_vport_send(struct vport *vport, struct sk_buff *skb, u8 mac_proto)
2333 }
2334
2335 skb->dev = vport->dev;
2336 + skb->tstamp = 0;
2337 vport->ops->send(skb);
2338 return;
2339
2340 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
2341 index 7ac3f04ca8c00..e92fcb150e57c 100644
2342 --- a/sound/pci/hda/hda_generic.c
2343 +++ b/sound/pci/hda/hda_generic.c
2344 @@ -3458,7 +3458,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
2345 struct hda_gen_spec *spec = codec->spec;
2346 const struct hda_input_mux *imux;
2347 struct nid_path *path;
2348 - int i, adc_idx, err = 0;
2349 + int i, adc_idx, ret, err = 0;
2350
2351 imux = &spec->input_mux;
2352 adc_idx = kcontrol->id.index;
2353 @@ -3468,9 +3468,13 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
2354 if (!path || !path->ctls[type])
2355 continue;
2356 kcontrol->private_value = path->ctls[type];
2357 - err = func(kcontrol, ucontrol);
2358 - if (err < 0)
2359 + ret = func(kcontrol, ucontrol);
2360 + if (ret < 0) {
2361 + err = ret;
2362 break;
2363 + }
2364 + if (ret > 0)
2365 + err = 1;
2366 }
2367 mutex_unlock(&codec->control_mutex);
2368 if (err >= 0 && spec->cap_sync_hook)
2369 diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2370 index 216e88624c5f3..7d59846808b52 100644
2371 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2372 +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
2373 @@ -127,7 +127,7 @@ static void sst_fill_alloc_params(struct snd_pcm_substream *substream,
2374 snd_pcm_uframes_t period_size;
2375 ssize_t periodbytes;
2376 ssize_t buffer_bytes = snd_pcm_lib_buffer_bytes(substream);
2377 - u32 buffer_addr = substream->runtime->dma_addr;
2378 + u32 buffer_addr = virt_to_phys(substream->runtime->dma_area);
2379
2380 channels = substream->runtime->channels;
2381 period_size = substream->runtime->period_size;