Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0267-5.4.168-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3637 - (show annotations) (download)
Mon Oct 24 12:40:44 2022 UTC (18 months ago) by niro
File size: 79292 byte(s)
-add missing
1 diff --git a/Makefile b/Makefile
2 index 1045f7fc08503..c23f5b17d239f 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 167
10 +SUBLEVEL = 168
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/boot/dts/imx6ull-pinfunc.h b/arch/arm/boot/dts/imx6ull-pinfunc.h
15 index eb025a9d47592..7328d4ef8559f 100644
16 --- a/arch/arm/boot/dts/imx6ull-pinfunc.h
17 +++ b/arch/arm/boot/dts/imx6ull-pinfunc.h
18 @@ -82,6 +82,6 @@
19 #define MX6ULL_PAD_CSI_DATA04__ESAI_TX_FS 0x01F4 0x0480 0x0000 0x9 0x0
20 #define MX6ULL_PAD_CSI_DATA05__ESAI_TX_CLK 0x01F8 0x0484 0x0000 0x9 0x0
21 #define MX6ULL_PAD_CSI_DATA06__ESAI_TX5_RX0 0x01FC 0x0488 0x0000 0x9 0x0
22 -#define MX6ULL_PAD_CSI_DATA07__ESAI_T0 0x0200 0x048C 0x0000 0x9 0x0
23 +#define MX6ULL_PAD_CSI_DATA07__ESAI_TX0 0x0200 0x048C 0x0000 0x9 0x0
24
25 #endif /* __DTS_IMX6ULL_PINFUNC_H */
26 diff --git a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
27 index b4c0a76a4d1af..4c2fcfcc7baed 100644
28 --- a/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
29 +++ b/arch/arm/boot/dts/socfpga_arria10_socdk_qspi.dts
30 @@ -12,7 +12,7 @@
31 flash0: n25q00@0 {
32 #address-cells = <1>;
33 #size-cells = <1>;
34 - compatible = "n25q00aa";
35 + compatible = "micron,mt25qu02g", "jedec,spi-nor";
36 reg = <0>;
37 spi-max-frequency = <100000000>;
38
39 diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
40 index 90e676e7019f2..1b02d46496a85 100644
41 --- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts
42 +++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
43 @@ -119,7 +119,7 @@
44 flash: flash@0 {
45 #address-cells = <1>;
46 #size-cells = <1>;
47 - compatible = "n25q256a";
48 + compatible = "micron,n25q256a", "jedec,spi-nor";
49 reg = <0>;
50 spi-max-frequency = <100000000>;
51
52 diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
53 index 6f138b2b26163..51bb436784e24 100644
54 --- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
55 +++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
56 @@ -124,7 +124,7 @@
57 flash0: n25q00@0 {
58 #address-cells = <1>;
59 #size-cells = <1>;
60 - compatible = "n25q00";
61 + compatible = "micron,mt25qu02g", "jedec,spi-nor";
62 reg = <0>; /* chip select */
63 spi-max-frequency = <100000000>;
64
65 diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
66 index c155ff02eb6e0..cae9ddd5ed38b 100644
67 --- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
68 +++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
69 @@ -169,7 +169,7 @@
70 flash: flash@0 {
71 #address-cells = <1>;
72 #size-cells = <1>;
73 - compatible = "n25q00";
74 + compatible = "micron,mt25qu02g", "jedec,spi-nor";
75 reg = <0>;
76 spi-max-frequency = <100000000>;
77
78 diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
79 index 8d5d3996f6f27..ca18b959e6559 100644
80 --- a/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
81 +++ b/arch/arm/boot/dts/socfpga_cyclone5_socrates.dts
82 @@ -80,7 +80,7 @@
83 flash: flash@0 {
84 #address-cells = <1>;
85 #size-cells = <1>;
86 - compatible = "n25q256a";
87 + compatible = "micron,n25q256a", "jedec,spi-nor";
88 reg = <0>;
89 spi-max-frequency = <100000000>;
90 m25p,fast-read;
91 diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
92 index 99a71757cdf46..3f7aa7bf0863a 100644
93 --- a/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
94 +++ b/arch/arm/boot/dts/socfpga_cyclone5_sodia.dts
95 @@ -116,7 +116,7 @@
96 flash0: n25q512a@0 {
97 #address-cells = <1>;
98 #size-cells = <1>;
99 - compatible = "n25q512a";
100 + compatible = "micron,n25q512a", "jedec,spi-nor";
101 reg = <0>;
102 spi-max-frequency = <100000000>;
103
104 diff --git a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
105 index a060718758b67..25874e1b9c829 100644
106 --- a/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
107 +++ b/arch/arm/boot/dts/socfpga_cyclone5_vining_fpga.dts
108 @@ -224,7 +224,7 @@
109 n25q128@0 {
110 #address-cells = <1>;
111 #size-cells = <1>;
112 - compatible = "n25q128";
113 + compatible = "micron,n25q128", "jedec,spi-nor";
114 reg = <0>; /* chip select */
115 spi-max-frequency = <100000000>;
116 m25p,fast-read;
117 @@ -241,7 +241,7 @@
118 n25q00@1 {
119 #address-cells = <1>;
120 #size-cells = <1>;
121 - compatible = "n25q00";
122 + compatible = "micron,mt25qu02g", "jedec,spi-nor";
123 reg = <1>; /* chip select */
124 spi-max-frequency = <100000000>;
125 m25p,fast-read;
126 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
127 index e87a04477440e..292ca70c512b5 100644
128 --- a/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
129 +++ b/arch/arm64/boot/dts/rockchip/rk3399-khadas-edge.dtsi
130 @@ -685,7 +685,6 @@
131 &sdhci {
132 bus-width = <8>;
133 mmc-hs400-1_8v;
134 - mmc-hs400-enhanced-strobe;
135 non-removable;
136 status = "okay";
137 };
138 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
139 index 73be38a537960..a72e77c261ef3 100644
140 --- a/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
141 +++ b/arch/arm64/boot/dts/rockchip/rk3399-leez-p710.dts
142 @@ -49,7 +49,7 @@
143 regulator-boot-on;
144 regulator-min-microvolt = <3300000>;
145 regulator-max-microvolt = <3300000>;
146 - vim-supply = <&vcc3v3_sys>;
147 + vin-supply = <&vcc3v3_sys>;
148 };
149
150 vcc3v3_sys: vcc3v3-sys {
151 diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
152 index 1ae1ebd4efdd0..da3b031d4befa 100644
153 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
154 +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dts
155 @@ -452,7 +452,7 @@
156 status = "okay";
157
158 bt656-supply = <&vcc_3v0>;
159 - audio-supply = <&vcc_3v0>;
160 + audio-supply = <&vcc1v8_codec>;
161 sdmmc-supply = <&vcc_sdio>;
162 gpio1830-supply = <&vcc_3v0>;
163 };
164 diff --git a/arch/s390/kernel/machine_kexec_file.c b/arch/s390/kernel/machine_kexec_file.c
165 index e7435f3a3d2d2..76cd09879eaf4 100644
166 --- a/arch/s390/kernel/machine_kexec_file.c
167 +++ b/arch/s390/kernel/machine_kexec_file.c
168 @@ -277,6 +277,7 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
169 {
170 Elf_Rela *relas;
171 int i, r_type;
172 + int ret;
173
174 relas = (void *)pi->ehdr + relsec->sh_offset;
175
176 @@ -311,7 +312,11 @@ int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
177 addr = section->sh_addr + relas[i].r_offset;
178
179 r_type = ELF64_R_TYPE(relas[i].r_info);
180 - arch_kexec_do_relocs(r_type, loc, val, addr);
181 + ret = arch_kexec_do_relocs(r_type, loc, val, addr);
182 + if (ret) {
183 + pr_err("Unknown rela relocation: %d\n", r_type);
184 + return -ENOEXEC;
185 + }
186 }
187 return 0;
188 }
189 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
190 index 464efedc778b0..2b247014ba452 100644
191 --- a/drivers/ata/libata-scsi.c
192 +++ b/drivers/ata/libata-scsi.c
193 @@ -3164,8 +3164,19 @@ static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc)
194 goto invalid_fld;
195 }
196
197 - if (ata_is_ncq(tf->protocol) && (cdb[2 + cdb_offset] & 0x3) == 0)
198 - tf->protocol = ATA_PROT_NCQ_NODATA;
199 + if ((cdb[2 + cdb_offset] & 0x3) == 0) {
200 + /*
201 + * When T_LENGTH is zero (No data is transferred), dir should
202 + * be DMA_NONE.
203 + */
204 + if (scmd->sc_data_direction != DMA_NONE) {
205 + fp = 2 + cdb_offset;
206 + goto invalid_fld;
207 + }
208 +
209 + if (ata_is_ncq(tf->protocol))
210 + tf->protocol = ATA_PROT_NCQ_NODATA;
211 + }
212
213 /* enable LBA */
214 tf->flags |= ATA_TFLAG_LBA;
215 diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
216 index baf10b73675e2..774af5ce70dad 100644
217 --- a/drivers/block/xen-blkfront.c
218 +++ b/drivers/block/xen-blkfront.c
219 @@ -1565,9 +1565,12 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
220 unsigned long flags;
221 struct blkfront_ring_info *rinfo = (struct blkfront_ring_info *)dev_id;
222 struct blkfront_info *info = rinfo->dev_info;
223 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
224
225 - if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
226 + if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
227 + xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
228 return IRQ_HANDLED;
229 + }
230
231 spin_lock_irqsave(&rinfo->ring_lock, flags);
232 again:
233 @@ -1583,6 +1586,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
234 unsigned long id;
235 unsigned int op;
236
237 + eoiflag = 0;
238 +
239 RING_COPY_RESPONSE(&rinfo->ring, i, &bret);
240 id = bret.id;
241
242 @@ -1698,6 +1703,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
243
244 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
245
246 + xen_irq_lateeoi(irq, eoiflag);
247 +
248 return IRQ_HANDLED;
249
250 err:
251 @@ -1705,6 +1712,8 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
252
253 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
254
255 + /* No EOI in order to avoid further interrupts. */
256 +
257 pr_alert("%s disabled for further use\n", info->gd->disk_name);
258 return IRQ_HANDLED;
259 }
260 @@ -1744,8 +1753,8 @@ static int setup_blkring(struct xenbus_device *dev,
261 if (err)
262 goto fail;
263
264 - err = bind_evtchn_to_irqhandler(rinfo->evtchn, blkif_interrupt, 0,
265 - "blkif", rinfo);
266 + err = bind_evtchn_to_irqhandler_lateeoi(rinfo->evtchn, blkif_interrupt,
267 + 0, "blkif", rinfo);
268 if (err <= 0) {
269 xenbus_dev_fatal(dev, err,
270 "bind_evtchn_to_irqhandler failed");
271 diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
272 index 6ff87cd867121..e4e1b4e94a67b 100644
273 --- a/drivers/clk/clk.c
274 +++ b/drivers/clk/clk.c
275 @@ -3299,6 +3299,14 @@ static int __clk_core_init(struct clk_core *core)
276
277 clk_prepare_lock();
278
279 + /*
280 + * Set hw->core after grabbing the prepare_lock to synchronize with
281 + * callers of clk_core_fill_parent_index() where we treat hw->core
282 + * being NULL as the clk not being registered yet. This is crucial so
283 + * that clks aren't parented until their parent is fully registered.
284 + */
285 + core->hw->core = core;
286 +
287 ret = clk_pm_runtime_get(core);
288 if (ret)
289 goto unlock;
290 @@ -3452,8 +3460,10 @@ static int __clk_core_init(struct clk_core *core)
291 out:
292 clk_pm_runtime_put(core);
293 unlock:
294 - if (ret)
295 + if (ret) {
296 hlist_del_init(&core->child_node);
297 + core->hw->core = NULL;
298 + }
299
300 clk_prepare_unlock();
301
302 @@ -3699,7 +3709,6 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
303 core->num_parents = init->num_parents;
304 core->min_rate = 0;
305 core->max_rate = ULONG_MAX;
306 - hw->core = core;
307
308 ret = clk_core_populate_parent_map(core, init);
309 if (ret)
310 @@ -3717,7 +3726,7 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
311 goto fail_create_clk;
312 }
313
314 - clk_core_link_consumer(hw->core, hw->clk);
315 + clk_core_link_consumer(core, hw->clk);
316
317 ret = __clk_core_init(core);
318 if (!ret)
319 diff --git a/drivers/dma/st_fdma.c b/drivers/dma/st_fdma.c
320 index 67087dbe2f9fa..f7393c19a1ba3 100644
321 --- a/drivers/dma/st_fdma.c
322 +++ b/drivers/dma/st_fdma.c
323 @@ -873,4 +873,4 @@ MODULE_LICENSE("GPL v2");
324 MODULE_DESCRIPTION("STMicroelectronics FDMA engine driver");
325 MODULE_AUTHOR("Ludovic.barre <Ludovic.barre@st.com>");
326 MODULE_AUTHOR("Peter Griffin <peter.griffin@linaro.org>");
327 -MODULE_ALIAS("platform: " DRIVER_NAME);
328 +MODULE_ALIAS("platform:" DRIVER_NAME);
329 diff --git a/drivers/firmware/scpi_pm_domain.c b/drivers/firmware/scpi_pm_domain.c
330 index 51201600d789b..800673910b511 100644
331 --- a/drivers/firmware/scpi_pm_domain.c
332 +++ b/drivers/firmware/scpi_pm_domain.c
333 @@ -16,7 +16,6 @@ struct scpi_pm_domain {
334 struct generic_pm_domain genpd;
335 struct scpi_ops *ops;
336 u32 domain;
337 - char name[30];
338 };
339
340 /*
341 @@ -110,8 +109,13 @@ static int scpi_pm_domain_probe(struct platform_device *pdev)
342
343 scpi_pd->domain = i;
344 scpi_pd->ops = scpi_ops;
345 - sprintf(scpi_pd->name, "%pOFn.%d", np, i);
346 - scpi_pd->genpd.name = scpi_pd->name;
347 + scpi_pd->genpd.name = devm_kasprintf(dev, GFP_KERNEL,
348 + "%pOFn.%d", np, i);
349 + if (!scpi_pd->genpd.name) {
350 + dev_err(dev, "Failed to allocate genpd name:%pOFn.%d\n",
351 + np, i);
352 + continue;
353 + }
354 scpi_pd->genpd.power_off = scpi_pd_power_off;
355 scpi_pd->genpd.power_on = scpi_pd_power_on;
356
357 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
358 index 06cdc22b5501d..5906a8951a6c6 100644
359 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
360 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
361 @@ -2906,8 +2906,8 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev)
362 AMD_PG_SUPPORT_CP |
363 AMD_PG_SUPPORT_GDS |
364 AMD_PG_SUPPORT_RLC_SMU_HS)) {
365 - WREG32(mmRLC_JUMP_TABLE_RESTORE,
366 - adev->gfx.rlc.cp_table_gpu_addr >> 8);
367 + WREG32_SOC15(GC, 0, mmRLC_JUMP_TABLE_RESTORE,
368 + adev->gfx.rlc.cp_table_gpu_addr >> 8);
369 gfx_v9_0_init_gfx_power_gating(adev);
370 }
371 }
372 diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
373 index 94fde39d9ff7a..d1bbd2b197fc6 100644
374 --- a/drivers/iio/adc/stm32-adc.c
375 +++ b/drivers/iio/adc/stm32-adc.c
376 @@ -933,6 +933,7 @@ pwr_dwn:
377
378 static void stm32h7_adc_unprepare(struct stm32_adc *adc)
379 {
380 + stm32_adc_writel(adc, STM32H7_ADC_PCSEL, 0);
381 stm32h7_adc_disable(adc);
382 stm32h7_adc_enter_pwr_down(adc);
383 }
384 diff --git a/drivers/input/touchscreen/of_touchscreen.c b/drivers/input/touchscreen/of_touchscreen.c
385 index e16ec4c7043a4..2962c3747adc3 100644
386 --- a/drivers/input/touchscreen/of_touchscreen.c
387 +++ b/drivers/input/touchscreen/of_touchscreen.c
388 @@ -81,8 +81,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
389 touchscreen_get_prop_u32(dev, "touchscreen-size-x",
390 input_abs_get_max(input,
391 axis) + 1,
392 - &maximum) |
393 - touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
394 + &maximum);
395 + data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-x",
396 input_abs_get_fuzz(input, axis),
397 &fuzz);
398 if (data_present)
399 @@ -95,8 +95,8 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
400 touchscreen_get_prop_u32(dev, "touchscreen-size-y",
401 input_abs_get_max(input,
402 axis) + 1,
403 - &maximum) |
404 - touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
405 + &maximum);
406 + data_present |= touchscreen_get_prop_u32(dev, "touchscreen-fuzz-y",
407 input_abs_get_fuzz(input, axis),
408 &fuzz);
409 if (data_present)
410 @@ -106,11 +106,11 @@ void touchscreen_parse_properties(struct input_dev *input, bool multitouch,
411 data_present = touchscreen_get_prop_u32(dev,
412 "touchscreen-max-pressure",
413 input_abs_get_max(input, axis),
414 - &maximum) |
415 - touchscreen_get_prop_u32(dev,
416 - "touchscreen-fuzz-pressure",
417 - input_abs_get_fuzz(input, axis),
418 - &fuzz);
419 + &maximum);
420 + data_present |= touchscreen_get_prop_u32(dev,
421 + "touchscreen-fuzz-pressure",
422 + input_abs_get_fuzz(input, axis),
423 + &fuzz);
424 if (data_present)
425 touchscreen_set_params(input, axis, 0, maximum, fuzz);
426
427 diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
428 index 9e4d1212f4c16..63f2baed3c8a6 100644
429 --- a/drivers/md/persistent-data/dm-btree-remove.c
430 +++ b/drivers/md/persistent-data/dm-btree-remove.c
431 @@ -423,9 +423,9 @@ static int rebalance_children(struct shadow_spine *s,
432
433 memcpy(n, dm_block_data(child),
434 dm_bm_block_size(dm_tm_get_bm(info->tm)));
435 - dm_tm_unlock(info->tm, child);
436
437 dm_tm_dec(info->tm, dm_block_location(child));
438 + dm_tm_unlock(info->tm, child);
439 return 0;
440 }
441
442 diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
443 index 55b4ae7037a4e..5fbce81b64c77 100644
444 --- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
445 +++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
446 @@ -931,8 +931,6 @@ static int mxl111sf_init(struct dvb_usb_device *d)
447 .len = sizeof(eeprom), .buf = eeprom },
448 };
449
450 - mutex_init(&state->msg_lock);
451 -
452 ret = get_chip_info(state);
453 if (mxl_fail(ret))
454 pr_err("failed to get chip info during probe");
455 @@ -1074,6 +1072,14 @@ static int mxl111sf_get_stream_config_dvbt(struct dvb_frontend *fe,
456 return 0;
457 }
458
459 +static int mxl111sf_probe(struct dvb_usb_device *dev)
460 +{
461 + struct mxl111sf_state *state = d_to_priv(dev);
462 +
463 + mutex_init(&state->msg_lock);
464 + return 0;
465 +}
466 +
467 static struct dvb_usb_device_properties mxl111sf_props_dvbt = {
468 .driver_name = KBUILD_MODNAME,
469 .owner = THIS_MODULE,
470 @@ -1083,6 +1089,7 @@ static struct dvb_usb_device_properties mxl111sf_props_dvbt = {
471 .generic_bulk_ctrl_endpoint = 0x02,
472 .generic_bulk_ctrl_endpoint_response = 0x81,
473
474 + .probe = mxl111sf_probe,
475 .i2c_algo = &mxl111sf_i2c_algo,
476 .frontend_attach = mxl111sf_frontend_attach_dvbt,
477 .tuner_attach = mxl111sf_attach_tuner,
478 @@ -1124,6 +1131,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc = {
479 .generic_bulk_ctrl_endpoint = 0x02,
480 .generic_bulk_ctrl_endpoint_response = 0x81,
481
482 + .probe = mxl111sf_probe,
483 .i2c_algo = &mxl111sf_i2c_algo,
484 .frontend_attach = mxl111sf_frontend_attach_atsc,
485 .tuner_attach = mxl111sf_attach_tuner,
486 @@ -1165,6 +1173,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mh = {
487 .generic_bulk_ctrl_endpoint = 0x02,
488 .generic_bulk_ctrl_endpoint_response = 0x81,
489
490 + .probe = mxl111sf_probe,
491 .i2c_algo = &mxl111sf_i2c_algo,
492 .frontend_attach = mxl111sf_frontend_attach_mh,
493 .tuner_attach = mxl111sf_attach_tuner,
494 @@ -1233,6 +1242,7 @@ static struct dvb_usb_device_properties mxl111sf_props_atsc_mh = {
495 .generic_bulk_ctrl_endpoint = 0x02,
496 .generic_bulk_ctrl_endpoint_response = 0x81,
497
498 + .probe = mxl111sf_probe,
499 .i2c_algo = &mxl111sf_i2c_algo,
500 .frontend_attach = mxl111sf_frontend_attach_atsc_mh,
501 .tuner_attach = mxl111sf_attach_tuner,
502 @@ -1311,6 +1321,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury = {
503 .generic_bulk_ctrl_endpoint = 0x02,
504 .generic_bulk_ctrl_endpoint_response = 0x81,
505
506 + .probe = mxl111sf_probe,
507 .i2c_algo = &mxl111sf_i2c_algo,
508 .frontend_attach = mxl111sf_frontend_attach_mercury,
509 .tuner_attach = mxl111sf_attach_tuner,
510 @@ -1381,6 +1392,7 @@ static struct dvb_usb_device_properties mxl111sf_props_mercury_mh = {
511 .generic_bulk_ctrl_endpoint = 0x02,
512 .generic_bulk_ctrl_endpoint_response = 0x81,
513
514 + .probe = mxl111sf_probe,
515 .i2c_algo = &mxl111sf_i2c_algo,
516 .frontend_attach = mxl111sf_frontend_attach_mercury_mh,
517 .tuner_attach = mxl111sf_attach_tuner,
518 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
519 index 470d12e308814..5a2094a281e15 100644
520 --- a/drivers/net/ethernet/broadcom/bcmsysport.c
521 +++ b/drivers/net/ethernet/broadcom/bcmsysport.c
522 @@ -1277,11 +1277,11 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
523 struct bcm_sysport_priv *priv = netdev_priv(dev);
524 struct device *kdev = &priv->pdev->dev;
525 struct bcm_sysport_tx_ring *ring;
526 + unsigned long flags, desc_flags;
527 struct bcm_sysport_cb *cb;
528 struct netdev_queue *txq;
529 u32 len_status, addr_lo;
530 unsigned int skb_len;
531 - unsigned long flags;
532 dma_addr_t mapping;
533 u16 queue;
534 int ret;
535 @@ -1339,8 +1339,10 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
536 ring->desc_count--;
537
538 /* Ports are latched, so write upper address first */
539 + spin_lock_irqsave(&priv->desc_lock, desc_flags);
540 tdma_writel(priv, len_status, TDMA_WRITE_PORT_HI(ring->index));
541 tdma_writel(priv, addr_lo, TDMA_WRITE_PORT_LO(ring->index));
542 + spin_unlock_irqrestore(&priv->desc_lock, desc_flags);
543
544 /* Check ring space and update SW control flow */
545 if (ring->desc_count == 0)
546 @@ -1970,6 +1972,7 @@ static int bcm_sysport_open(struct net_device *dev)
547 }
548
549 /* Initialize both hardware and software ring */
550 + spin_lock_init(&priv->desc_lock);
551 for (i = 0; i < dev->num_tx_queues; i++) {
552 ret = bcm_sysport_init_tx_ring(priv, i);
553 if (ret) {
554 diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
555 index 6d80735fbc7f4..57336ca3f4277 100644
556 --- a/drivers/net/ethernet/broadcom/bcmsysport.h
557 +++ b/drivers/net/ethernet/broadcom/bcmsysport.h
558 @@ -742,6 +742,7 @@ struct bcm_sysport_priv {
559 int wol_irq;
560
561 /* Transmit rings */
562 + spinlock_t desc_lock;
563 struct bcm_sysport_tx_ring *tx_rings;
564
565 /* Receive queue */
566 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
567 index c11244a9b7e69..3df25b231ab5c 100644
568 --- a/drivers/net/ethernet/intel/igb/igb_main.c
569 +++ b/drivers/net/ethernet/intel/igb/igb_main.c
570 @@ -7374,6 +7374,20 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
571 struct vf_mac_filter *entry = NULL;
572 int ret = 0;
573
574 + if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
575 + !vf_data->trusted) {
576 + dev_warn(&pdev->dev,
577 + "VF %d requested MAC filter but is administratively denied\n",
578 + vf);
579 + return -EINVAL;
580 + }
581 + if (!is_valid_ether_addr(addr)) {
582 + dev_warn(&pdev->dev,
583 + "VF %d attempted to set invalid MAC filter\n",
584 + vf);
585 + return -EINVAL;
586 + }
587 +
588 switch (info) {
589 case E1000_VF_MAC_FILTER_CLR:
590 /* remove all unicast MAC filters related to the current VF */
591 @@ -7387,20 +7401,6 @@ static int igb_set_vf_mac_filter(struct igb_adapter *adapter, const int vf,
592 }
593 break;
594 case E1000_VF_MAC_FILTER_ADD:
595 - if ((vf_data->flags & IGB_VF_FLAG_PF_SET_MAC) &&
596 - !vf_data->trusted) {
597 - dev_warn(&pdev->dev,
598 - "VF %d requested MAC filter but is administratively denied\n",
599 - vf);
600 - return -EINVAL;
601 - }
602 - if (!is_valid_ether_addr(addr)) {
603 - dev_warn(&pdev->dev,
604 - "VF %d attempted to set invalid MAC filter\n",
605 - vf);
606 - return -EINVAL;
607 - }
608 -
609 /* try to find empty slot in the list */
610 list_for_each(pos, &adapter->vf_macs.l) {
611 entry = list_entry(pos, struct vf_mac_filter, l);
612 diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
613 index 77cb2ab7dab40..1082e49ea0560 100644
614 --- a/drivers/net/ethernet/intel/igbvf/netdev.c
615 +++ b/drivers/net/ethernet/intel/igbvf/netdev.c
616 @@ -2887,6 +2887,7 @@ static int igbvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
617 return 0;
618
619 err_hw_init:
620 + netif_napi_del(&adapter->rx_ring->napi);
621 kfree(adapter->tx_ring);
622 kfree(adapter->rx_ring);
623 err_sw_init:
624 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
625 index 9c42f741ed5ef..74728c0a44a81 100644
626 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
627 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c
628 @@ -3405,6 +3405,9 @@ static s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw)
629 /* flush pending Tx transactions */
630 ixgbe_clear_tx_pending(hw);
631
632 + /* set MDIO speed before talking to the PHY in case it's the 1st time */
633 + ixgbe_set_mdio_speed(hw);
634 +
635 /* PHY ops must be identified and initialized prior to reset */
636 status = hw->phy.ops.init(hw);
637 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED ||
638 diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
639 index 2b74425822ab1..e0a4acc6144bf 100644
640 --- a/drivers/net/netdevsim/bpf.c
641 +++ b/drivers/net/netdevsim/bpf.c
642 @@ -510,6 +510,7 @@ nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
643 goto err_free;
644 key = nmap->entry[i].key;
645 *key = i;
646 + memset(nmap->entry[i].value, 0, offmap->map.value_size);
647 }
648 }
649
650 diff --git a/drivers/net/wireless/marvell/mwifiex/cmdevt.c b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
651 index e8788c35a453d..ec04515bd9dfa 100644
652 --- a/drivers/net/wireless/marvell/mwifiex/cmdevt.c
653 +++ b/drivers/net/wireless/marvell/mwifiex/cmdevt.c
654 @@ -322,9 +322,9 @@ static int mwifiex_dnld_sleep_confirm_cmd(struct mwifiex_adapter *adapter)
655
656 adapter->seq_num++;
657 sleep_cfm_buf->seq_num =
658 - cpu_to_le16((HostCmd_SET_SEQ_NO_BSS_INFO
659 + cpu_to_le16(HostCmd_SET_SEQ_NO_BSS_INFO
660 (adapter->seq_num, priv->bss_num,
661 - priv->bss_type)));
662 + priv->bss_type));
663
664 mwifiex_dbg(adapter, CMD,
665 "cmd: DNLD_CMD: %#x, act %#x, len %d, seqno %#x\n",
666 diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h
667 index 8b9d0809daf62..076ea1c4b921d 100644
668 --- a/drivers/net/wireless/marvell/mwifiex/fw.h
669 +++ b/drivers/net/wireless/marvell/mwifiex/fw.h
670 @@ -512,10 +512,10 @@ enum mwifiex_channel_flags {
671
672 #define RF_ANTENNA_AUTO 0xFFFF
673
674 -#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) { \
675 - (((seq) & 0x00ff) | \
676 - (((num) & 0x000f) << 8)) | \
677 - (((type) & 0x000f) << 12); }
678 +#define HostCmd_SET_SEQ_NO_BSS_INFO(seq, num, type) \
679 + ((((seq) & 0x00ff) | \
680 + (((num) & 0x000f) << 8)) | \
681 + (((type) & 0x000f) << 12))
682
683 #define HostCmd_GET_SEQ_NO(seq) \
684 ((seq) & HostCmd_SEQ_NUM_MASK)
685 diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
686 index 32fe131ba366d..f7e746f1c9fb3 100644
687 --- a/drivers/net/xen-netback/common.h
688 +++ b/drivers/net/xen-netback/common.h
689 @@ -203,6 +203,7 @@ struct xenvif_queue { /* Per-queue data for xenvif */
690 unsigned int rx_queue_max;
691 unsigned int rx_queue_len;
692 unsigned long last_rx_time;
693 + unsigned int rx_slots_needed;
694 bool stalled;
695
696 struct xenvif_copy_state rx_copy;
697 diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c
698 index 48e2006f96ce6..7f68067c01745 100644
699 --- a/drivers/net/xen-netback/rx.c
700 +++ b/drivers/net/xen-netback/rx.c
701 @@ -33,28 +33,36 @@
702 #include <xen/xen.h>
703 #include <xen/events.h>
704
705 -static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
706 +/*
707 + * Update the needed ring page slots for the first SKB queued.
708 + * Note that any call sequence outside the RX thread calling this function
709 + * needs to wake up the RX thread via a call of xenvif_kick_thread()
710 + * afterwards in order to avoid a race with putting the thread to sleep.
711 + */
712 +static void xenvif_update_needed_slots(struct xenvif_queue *queue,
713 + const struct sk_buff *skb)
714 {
715 - RING_IDX prod, cons;
716 - struct sk_buff *skb;
717 - int needed;
718 - unsigned long flags;
719 -
720 - spin_lock_irqsave(&queue->rx_queue.lock, flags);
721 + unsigned int needed = 0;
722
723 - skb = skb_peek(&queue->rx_queue);
724 - if (!skb) {
725 - spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
726 - return false;
727 + if (skb) {
728 + needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
729 + if (skb_is_gso(skb))
730 + needed++;
731 + if (skb->sw_hash)
732 + needed++;
733 }
734
735 - needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
736 - if (skb_is_gso(skb))
737 - needed++;
738 - if (skb->sw_hash)
739 - needed++;
740 + WRITE_ONCE(queue->rx_slots_needed, needed);
741 +}
742
743 - spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
744 +static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
745 +{
746 + RING_IDX prod, cons;
747 + unsigned int needed;
748 +
749 + needed = READ_ONCE(queue->rx_slots_needed);
750 + if (!needed)
751 + return false;
752
753 do {
754 prod = queue->rx.sring->req_prod;
755 @@ -80,13 +88,19 @@ void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
756
757 spin_lock_irqsave(&queue->rx_queue.lock, flags);
758
759 - __skb_queue_tail(&queue->rx_queue, skb);
760 -
761 - queue->rx_queue_len += skb->len;
762 - if (queue->rx_queue_len > queue->rx_queue_max) {
763 + if (queue->rx_queue_len >= queue->rx_queue_max) {
764 struct net_device *dev = queue->vif->dev;
765
766 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
767 + kfree_skb(skb);
768 + queue->vif->dev->stats.rx_dropped++;
769 + } else {
770 + if (skb_queue_empty(&queue->rx_queue))
771 + xenvif_update_needed_slots(queue, skb);
772 +
773 + __skb_queue_tail(&queue->rx_queue, skb);
774 +
775 + queue->rx_queue_len += skb->len;
776 }
777
778 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
779 @@ -100,6 +114,8 @@ static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
780
781 skb = __skb_dequeue(&queue->rx_queue);
782 if (skb) {
783 + xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
784 +
785 queue->rx_queue_len -= skb->len;
786 if (queue->rx_queue_len < queue->rx_queue_max) {
787 struct netdev_queue *txq;
788 @@ -134,6 +150,7 @@ static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
789 break;
790 xenvif_rx_dequeue(queue);
791 kfree_skb(skb);
792 + queue->vif->dev->stats.rx_dropped++;
793 }
794 }
795
796 @@ -474,27 +491,31 @@ void xenvif_rx_action(struct xenvif_queue *queue)
797 xenvif_rx_copy_flush(queue);
798 }
799
800 -static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
801 +static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
802 {
803 RING_IDX prod, cons;
804
805 prod = queue->rx.sring->req_prod;
806 cons = queue->rx.req_cons;
807
808 + return prod - cons;
809 +}
810 +
811 +static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
812 +{
813 + unsigned int needed = READ_ONCE(queue->rx_slots_needed);
814 +
815 return !queue->stalled &&
816 - prod - cons < 1 &&
817 + xenvif_rx_queue_slots(queue) < needed &&
818 time_after(jiffies,
819 queue->last_rx_time + queue->vif->stall_timeout);
820 }
821
822 static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
823 {
824 - RING_IDX prod, cons;
825 -
826 - prod = queue->rx.sring->req_prod;
827 - cons = queue->rx.req_cons;
828 + unsigned int needed = READ_ONCE(queue->rx_slots_needed);
829
830 - return queue->stalled && prod - cons >= 1;
831 + return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
832 }
833
834 bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
835 diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
836 index d6f44343213cc..d2b3381f71825 100644
837 --- a/drivers/net/xen-netfront.c
838 +++ b/drivers/net/xen-netfront.c
839 @@ -142,6 +142,9 @@ struct netfront_queue {
840 struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
841 grant_ref_t gref_rx_head;
842 grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
843 +
844 + unsigned int rx_rsp_unconsumed;
845 + spinlock_t rx_cons_lock;
846 };
847
848 struct netfront_info {
849 @@ -364,12 +367,13 @@ static int xennet_open(struct net_device *dev)
850 return 0;
851 }
852
853 -static void xennet_tx_buf_gc(struct netfront_queue *queue)
854 +static bool xennet_tx_buf_gc(struct netfront_queue *queue)
855 {
856 RING_IDX cons, prod;
857 unsigned short id;
858 struct sk_buff *skb;
859 bool more_to_do;
860 + bool work_done = false;
861 const struct device *dev = &queue->info->netdev->dev;
862
863 BUG_ON(!netif_carrier_ok(queue->info->netdev));
864 @@ -386,6 +390,8 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
865 for (cons = queue->tx.rsp_cons; cons != prod; cons++) {
866 struct xen_netif_tx_response txrsp;
867
868 + work_done = true;
869 +
870 RING_COPY_RESPONSE(&queue->tx, cons, &txrsp);
871 if (txrsp.status == XEN_NETIF_RSP_NULL)
872 continue;
873 @@ -429,11 +435,13 @@ static void xennet_tx_buf_gc(struct netfront_queue *queue)
874
875 xennet_maybe_wake_tx(queue);
876
877 - return;
878 + return work_done;
879
880 err:
881 queue->info->broken = true;
882 dev_alert(dev, "Disabled for further use\n");
883 +
884 + return work_done;
885 }
886
887 struct xennet_gnttab_make_txreq {
888 @@ -753,6 +761,16 @@ static int xennet_close(struct net_device *dev)
889 return 0;
890 }
891
892 +static void xennet_set_rx_rsp_cons(struct netfront_queue *queue, RING_IDX val)
893 +{
894 + unsigned long flags;
895 +
896 + spin_lock_irqsave(&queue->rx_cons_lock, flags);
897 + queue->rx.rsp_cons = val;
898 + queue->rx_rsp_unconsumed = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
899 + spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
900 +}
901 +
902 static void xennet_move_rx_slot(struct netfront_queue *queue, struct sk_buff *skb,
903 grant_ref_t ref)
904 {
905 @@ -804,7 +822,7 @@ static int xennet_get_extras(struct netfront_queue *queue,
906 xennet_move_rx_slot(queue, skb, ref);
907 } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
908
909 - queue->rx.rsp_cons = cons;
910 + xennet_set_rx_rsp_cons(queue, cons);
911 return err;
912 }
913
914 @@ -884,7 +902,7 @@ next:
915 }
916
917 if (unlikely(err))
918 - queue->rx.rsp_cons = cons + slots;
919 + xennet_set_rx_rsp_cons(queue, cons + slots);
920
921 return err;
922 }
923 @@ -938,7 +956,8 @@ static int xennet_fill_frags(struct netfront_queue *queue,
924 __pskb_pull_tail(skb, pull_to - skb_headlen(skb));
925 }
926 if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
927 - queue->rx.rsp_cons = ++cons + skb_queue_len(list);
928 + xennet_set_rx_rsp_cons(queue,
929 + ++cons + skb_queue_len(list));
930 kfree_skb(nskb);
931 return -ENOENT;
932 }
933 @@ -951,7 +970,7 @@ static int xennet_fill_frags(struct netfront_queue *queue,
934 kfree_skb(nskb);
935 }
936
937 - queue->rx.rsp_cons = cons;
938 + xennet_set_rx_rsp_cons(queue, cons);
939
940 return 0;
941 }
942 @@ -1072,7 +1091,9 @@ err:
943
944 if (unlikely(xennet_set_skb_gso(skb, gso))) {
945 __skb_queue_head(&tmpq, skb);
946 - queue->rx.rsp_cons += skb_queue_len(&tmpq);
947 + xennet_set_rx_rsp_cons(queue,
948 + queue->rx.rsp_cons +
949 + skb_queue_len(&tmpq));
950 goto err;
951 }
952 }
953 @@ -1096,7 +1117,8 @@ err:
954
955 __skb_queue_tail(&rxq, skb);
956
957 - i = ++queue->rx.rsp_cons;
958 + i = queue->rx.rsp_cons + 1;
959 + xennet_set_rx_rsp_cons(queue, i);
960 work_done++;
961 }
962
963 @@ -1258,40 +1280,79 @@ static int xennet_set_features(struct net_device *dev,
964 return 0;
965 }
966
967 -static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
968 +static bool xennet_handle_tx(struct netfront_queue *queue, unsigned int *eoi)
969 {
970 - struct netfront_queue *queue = dev_id;
971 unsigned long flags;
972
973 - if (queue->info->broken)
974 - return IRQ_HANDLED;
975 + if (unlikely(queue->info->broken))
976 + return false;
977
978 spin_lock_irqsave(&queue->tx_lock, flags);
979 - xennet_tx_buf_gc(queue);
980 + if (xennet_tx_buf_gc(queue))
981 + *eoi = 0;
982 spin_unlock_irqrestore(&queue->tx_lock, flags);
983
984 + return true;
985 +}
986 +
987 +static irqreturn_t xennet_tx_interrupt(int irq, void *dev_id)
988 +{
989 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
990 +
991 + if (likely(xennet_handle_tx(dev_id, &eoiflag)))
992 + xen_irq_lateeoi(irq, eoiflag);
993 +
994 return IRQ_HANDLED;
995 }
996
997 -static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
998 +static bool xennet_handle_rx(struct netfront_queue *queue, unsigned int *eoi)
999 {
1000 - struct netfront_queue *queue = dev_id;
1001 - struct net_device *dev = queue->info->netdev;
1002 + unsigned int work_queued;
1003 + unsigned long flags;
1004
1005 - if (queue->info->broken)
1006 - return IRQ_HANDLED;
1007 + if (unlikely(queue->info->broken))
1008 + return false;
1009 +
1010 + spin_lock_irqsave(&queue->rx_cons_lock, flags);
1011 + work_queued = RING_HAS_UNCONSUMED_RESPONSES(&queue->rx);
1012 + if (work_queued > queue->rx_rsp_unconsumed) {
1013 + queue->rx_rsp_unconsumed = work_queued;
1014 + *eoi = 0;
1015 + } else if (unlikely(work_queued < queue->rx_rsp_unconsumed)) {
1016 + const struct device *dev = &queue->info->netdev->dev;
1017 +
1018 + spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1019 + dev_alert(dev, "RX producer index going backwards\n");
1020 + dev_alert(dev, "Disabled for further use\n");
1021 + queue->info->broken = true;
1022 + return false;
1023 + }
1024 + spin_unlock_irqrestore(&queue->rx_cons_lock, flags);
1025
1026 - if (likely(netif_carrier_ok(dev) &&
1027 - RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1028 + if (likely(netif_carrier_ok(queue->info->netdev) && work_queued))
1029 napi_schedule(&queue->napi);
1030
1031 + return true;
1032 +}
1033 +
1034 +static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1035 +{
1036 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1037 +
1038 + if (likely(xennet_handle_rx(dev_id, &eoiflag)))
1039 + xen_irq_lateeoi(irq, eoiflag);
1040 +
1041 return IRQ_HANDLED;
1042 }
1043
1044 static irqreturn_t xennet_interrupt(int irq, void *dev_id)
1045 {
1046 - xennet_tx_interrupt(irq, dev_id);
1047 - xennet_rx_interrupt(irq, dev_id);
1048 + unsigned int eoiflag = XEN_EOI_FLAG_SPURIOUS;
1049 +
1050 + if (xennet_handle_tx(dev_id, &eoiflag) &&
1051 + xennet_handle_rx(dev_id, &eoiflag))
1052 + xen_irq_lateeoi(irq, eoiflag);
1053 +
1054 return IRQ_HANDLED;
1055 }
1056
1057 @@ -1525,9 +1586,10 @@ static int setup_netfront_single(struct netfront_queue *queue)
1058 if (err < 0)
1059 goto fail;
1060
1061 - err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1062 - xennet_interrupt,
1063 - 0, queue->info->netdev->name, queue);
1064 + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1065 + xennet_interrupt, 0,
1066 + queue->info->netdev->name,
1067 + queue);
1068 if (err < 0)
1069 goto bind_fail;
1070 queue->rx_evtchn = queue->tx_evtchn;
1071 @@ -1555,18 +1617,18 @@ static int setup_netfront_split(struct netfront_queue *queue)
1072
1073 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
1074 "%s-tx", queue->name);
1075 - err = bind_evtchn_to_irqhandler(queue->tx_evtchn,
1076 - xennet_tx_interrupt,
1077 - 0, queue->tx_irq_name, queue);
1078 + err = bind_evtchn_to_irqhandler_lateeoi(queue->tx_evtchn,
1079 + xennet_tx_interrupt, 0,
1080 + queue->tx_irq_name, queue);
1081 if (err < 0)
1082 goto bind_tx_fail;
1083 queue->tx_irq = err;
1084
1085 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
1086 "%s-rx", queue->name);
1087 - err = bind_evtchn_to_irqhandler(queue->rx_evtchn,
1088 - xennet_rx_interrupt,
1089 - 0, queue->rx_irq_name, queue);
1090 + err = bind_evtchn_to_irqhandler_lateeoi(queue->rx_evtchn,
1091 + xennet_rx_interrupt, 0,
1092 + queue->rx_irq_name, queue);
1093 if (err < 0)
1094 goto bind_rx_fail;
1095 queue->rx_irq = err;
1096 @@ -1668,6 +1730,7 @@ static int xennet_init_queue(struct netfront_queue *queue)
1097
1098 spin_lock_init(&queue->tx_lock);
1099 spin_lock_init(&queue->rx_lock);
1100 + spin_lock_init(&queue->rx_cons_lock);
1101
1102 timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
1103
1104 diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
1105 index d0cc6c0d74d6b..7dc10c2b4785d 100644
1106 --- a/drivers/pci/msi.c
1107 +++ b/drivers/pci/msi.c
1108 @@ -827,9 +827,6 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
1109 goto out_disable;
1110 }
1111
1112 - /* Ensure that all table entries are masked. */
1113 - msix_mask_all(base, tsize);
1114 -
1115 ret = msix_setup_entries(dev, base, entries, nvec, affd);
1116 if (ret)
1117 goto out_disable;
1118 @@ -852,6 +849,16 @@ static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
1119 /* Set MSI-X enabled bits and unmask the function */
1120 pci_intx_for_msi(dev, 0);
1121 dev->msix_enabled = 1;
1122 +
1123 + /*
1124 + * Ensure that all table entries are masked to prevent
1125 + * stale entries from firing in a crash kernel.
1126 + *
1127 + * Done late to deal with a broken Marvell NVME device
1128 + * which takes the MSI-X mask bits into account even
1129 + * when MSI-X is disabled, which prevents MSI delivery.
1130 + */
1131 + msix_mask_all(base, tsize);
1132 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0);
1133
1134 pcibios_free_irq(dev);
1135 @@ -878,7 +885,7 @@ out_free:
1136 free_msi_irqs(dev);
1137
1138 out_disable:
1139 - pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1140 + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0);
1141
1142 return ret;
1143 }
1144 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
1145 index 44181a2cbf18d..408166bd20f33 100644
1146 --- a/drivers/scsi/scsi_debug.c
1147 +++ b/drivers/scsi/scsi_debug.c
1148 @@ -2296,11 +2296,11 @@ static int resp_mode_select(struct scsi_cmnd *scp,
1149 __func__, param_len, res);
1150 md_len = mselect6 ? (arr[0] + 1) : (get_unaligned_be16(arr + 0) + 2);
1151 bd_len = mselect6 ? arr[3] : get_unaligned_be16(arr + 6);
1152 - if (md_len > 2) {
1153 + off = bd_len + (mselect6 ? 4 : 8);
1154 + if (md_len > 2 || off >= res) {
1155 mk_sense_invalid_fld(scp, SDEB_IN_DATA, 0, -1);
1156 return check_condition_result;
1157 }
1158 - off = bd_len + (mselect6 ? 4 : 8);
1159 mpage = arr[off] & 0x3f;
1160 ps = !!(arr[off] & 0x80);
1161 if (ps) {
1162 diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
1163 index 3eb44e65b3261..1a54bac512b69 100644
1164 --- a/drivers/soc/tegra/fuse/fuse-tegra.c
1165 +++ b/drivers/soc/tegra/fuse/fuse-tegra.c
1166 @@ -172,7 +172,7 @@ static struct platform_driver tegra_fuse_driver = {
1167 };
1168 builtin_platform_driver(tegra_fuse_driver);
1169
1170 -bool __init tegra_fuse_read_spare(unsigned int spare)
1171 +u32 __init tegra_fuse_read_spare(unsigned int spare)
1172 {
1173 unsigned int offset = fuse->soc->info->spare + spare * 4;
1174
1175 diff --git a/drivers/soc/tegra/fuse/fuse.h b/drivers/soc/tegra/fuse/fuse.h
1176 index 7230cb3305033..6996cfc7cbca3 100644
1177 --- a/drivers/soc/tegra/fuse/fuse.h
1178 +++ b/drivers/soc/tegra/fuse/fuse.h
1179 @@ -53,7 +53,7 @@ struct tegra_fuse {
1180 void tegra_init_revision(void);
1181 void tegra_init_apbmisc(void);
1182
1183 -bool __init tegra_fuse_read_spare(unsigned int spare);
1184 +u32 __init tegra_fuse_read_spare(unsigned int spare);
1185 u32 __init tegra_fuse_read_early(unsigned int offset);
1186
1187 #ifdef CONFIG_ARCH_TEGRA_2x_SOC
1188 diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
1189 index 15da02aeee948..2d2d04c071401 100644
1190 --- a/drivers/tty/hvc/hvc_xen.c
1191 +++ b/drivers/tty/hvc/hvc_xen.c
1192 @@ -37,6 +37,8 @@ struct xencons_info {
1193 struct xenbus_device *xbdev;
1194 struct xencons_interface *intf;
1195 unsigned int evtchn;
1196 + XENCONS_RING_IDX out_cons;
1197 + unsigned int out_cons_same;
1198 struct hvc_struct *hvc;
1199 int irq;
1200 int vtermno;
1201 @@ -138,6 +140,8 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
1202 XENCONS_RING_IDX cons, prod;
1203 int recv = 0;
1204 struct xencons_info *xencons = vtermno_to_xencons(vtermno);
1205 + unsigned int eoiflag = 0;
1206 +
1207 if (xencons == NULL)
1208 return -EINVAL;
1209 intf = xencons->intf;
1210 @@ -157,7 +161,27 @@ static int domU_read_console(uint32_t vtermno, char *buf, int len)
1211 mb(); /* read ring before consuming */
1212 intf->in_cons = cons;
1213
1214 - notify_daemon(xencons);
1215 + /*
1216 + * When to mark interrupt having been spurious:
1217 + * - there was no new data to be read, and
1218 + * - the backend did not consume some output bytes, and
1219 + * - the previous round with no read data didn't see consumed bytes
1220 + * (we might have a race with an interrupt being in flight while
1221 + * updating xencons->out_cons, so account for that by allowing one
1222 + * round without any visible reason)
1223 + */
1224 + if (intf->out_cons != xencons->out_cons) {
1225 + xencons->out_cons = intf->out_cons;
1226 + xencons->out_cons_same = 0;
1227 + }
1228 + if (recv) {
1229 + notify_daemon(xencons);
1230 + } else if (xencons->out_cons_same++ > 1) {
1231 + eoiflag = XEN_EOI_FLAG_SPURIOUS;
1232 + }
1233 +
1234 + xen_irq_lateeoi(xencons->irq, eoiflag);
1235 +
1236 return recv;
1237 }
1238
1239 @@ -386,7 +410,7 @@ static int xencons_connect_backend(struct xenbus_device *dev,
1240 if (ret)
1241 return ret;
1242 info->evtchn = evtchn;
1243 - irq = bind_evtchn_to_irq(evtchn);
1244 + irq = bind_interdomain_evtchn_to_irq_lateeoi(dev->otherend_id, evtchn);
1245 if (irq < 0)
1246 return irq;
1247 info->irq = irq;
1248 @@ -550,7 +574,7 @@ static int __init xen_hvc_init(void)
1249 return r;
1250
1251 info = vtermno_to_xencons(HVC_COOKIE);
1252 - info->irq = bind_evtchn_to_irq(info->evtchn);
1253 + info->irq = bind_evtchn_to_irq_lateeoi(info->evtchn);
1254 }
1255 if (info->irq < 0)
1256 info->irq = 0; /* NO_IRQ */
1257 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
1258 index e170c5b4d6f0c..a118c44c70e1e 100644
1259 --- a/drivers/usb/core/quirks.c
1260 +++ b/drivers/usb/core/quirks.c
1261 @@ -435,6 +435,9 @@ static const struct usb_device_id usb_quirk_list[] = {
1262 { USB_DEVICE(0x1532, 0x0116), .driver_info =
1263 USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
1264
1265 + /* Lenovo USB-C to Ethernet Adapter RTL8153-04 */
1266 + { USB_DEVICE(0x17ef, 0x720c), .driver_info = USB_QUIRK_NO_LPM },
1267 +
1268 /* Lenovo Powered USB-C Travel Hub (4X90S92381, RTL8153 GigE) */
1269 { USB_DEVICE(0x17ef, 0x721e), .driver_info = USB_QUIRK_NO_LPM },
1270
1271 diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
1272 index d2980e30f3417..c5acf5c39fb18 100644
1273 --- a/drivers/usb/gadget/composite.c
1274 +++ b/drivers/usb/gadget/composite.c
1275 @@ -1649,14 +1649,14 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1276 u8 endp;
1277
1278 if (w_length > USB_COMP_EP0_BUFSIZ) {
1279 - if (ctrl->bRequestType == USB_DIR_OUT) {
1280 - goto done;
1281 - } else {
1282 + if (ctrl->bRequestType & USB_DIR_IN) {
1283 /* Cast away the const, we are going to overwrite on purpose. */
1284 __le16 *temp = (__le16 *)&ctrl->wLength;
1285
1286 *temp = cpu_to_le16(USB_COMP_EP0_BUFSIZ);
1287 w_length = USB_COMP_EP0_BUFSIZ;
1288 + } else {
1289 + goto done;
1290 }
1291 }
1292
1293 diff --git a/drivers/usb/gadget/legacy/dbgp.c b/drivers/usb/gadget/legacy/dbgp.c
1294 index 355bc7dab9d5f..6bcbad3825802 100644
1295 --- a/drivers/usb/gadget/legacy/dbgp.c
1296 +++ b/drivers/usb/gadget/legacy/dbgp.c
1297 @@ -346,14 +346,14 @@ static int dbgp_setup(struct usb_gadget *gadget,
1298 u16 len = 0;
1299
1300 if (length > DBGP_REQ_LEN) {
1301 - if (ctrl->bRequestType == USB_DIR_OUT) {
1302 - return err;
1303 - } else {
1304 + if (ctrl->bRequestType & USB_DIR_IN) {
1305 /* Cast away the const, we are going to overwrite on purpose. */
1306 __le16 *temp = (__le16 *)&ctrl->wLength;
1307
1308 *temp = cpu_to_le16(DBGP_REQ_LEN);
1309 length = DBGP_REQ_LEN;
1310 + } else {
1311 + return err;
1312 }
1313 }
1314
1315 diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
1316 index f0aff79f544c3..5f1e15172403e 100644
1317 --- a/drivers/usb/gadget/legacy/inode.c
1318 +++ b/drivers/usb/gadget/legacy/inode.c
1319 @@ -1336,14 +1336,14 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
1320 u16 w_length = le16_to_cpu(ctrl->wLength);
1321
1322 if (w_length > RBUF_SIZE) {
1323 - if (ctrl->bRequestType == USB_DIR_OUT) {
1324 - return value;
1325 - } else {
1326 + if (ctrl->bRequestType & USB_DIR_IN) {
1327 /* Cast away the const, we are going to overwrite on purpose. */
1328 __le16 *temp = (__le16 *)&ctrl->wLength;
1329
1330 *temp = cpu_to_le16(RBUF_SIZE);
1331 w_length = RBUF_SIZE;
1332 + } else {
1333 + return value;
1334 }
1335 }
1336
1337 diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
1338 index beee3543950fe..ded05c39e4d1c 100644
1339 --- a/drivers/usb/host/xhci-pci.c
1340 +++ b/drivers/usb/host/xhci-pci.c
1341 @@ -65,6 +65,8 @@
1342 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e
1343 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6
1344 #define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7
1345 +#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 0x161c
1346 +#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8 0x161f
1347
1348 #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042
1349 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142
1350 @@ -303,7 +305,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
1351 pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 ||
1352 pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 ||
1353 pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 ||
1354 - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6))
1355 + pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 ||
1356 + pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 ||
1357 + pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8))
1358 xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW;
1359
1360 if (xhci->quirks & XHCI_RESET_ON_RESUME)
1361 diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
1362 index 004b6598706b1..50f289b124d0d 100644
1363 --- a/drivers/usb/serial/cp210x.c
1364 +++ b/drivers/usb/serial/cp210x.c
1365 @@ -1552,6 +1552,8 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
1366
1367 /* 2 banks of GPIO - One for the pins taken from each serial port */
1368 if (intf_num == 0) {
1369 + priv->gc.ngpio = 2;
1370 +
1371 if (mode.eci == CP210X_PIN_MODE_MODEM) {
1372 /* mark all GPIOs of this interface as reserved */
1373 priv->gpio_altfunc = 0xff;
1374 @@ -1562,8 +1564,9 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
1375 priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
1376 CP210X_ECI_GPIO_MODE_MASK) >>
1377 CP210X_ECI_GPIO_MODE_OFFSET);
1378 - priv->gc.ngpio = 2;
1379 } else if (intf_num == 1) {
1380 + priv->gc.ngpio = 3;
1381 +
1382 if (mode.sci == CP210X_PIN_MODE_MODEM) {
1383 /* mark all GPIOs of this interface as reserved */
1384 priv->gpio_altfunc = 0xff;
1385 @@ -1574,7 +1577,6 @@ static int cp2105_gpioconf_init(struct usb_serial *serial)
1386 priv->gpio_pushpull = (u8)((le16_to_cpu(config.gpio_mode) &
1387 CP210X_SCI_GPIO_MODE_MASK) >>
1388 CP210X_SCI_GPIO_MODE_OFFSET);
1389 - priv->gc.ngpio = 3;
1390 } else {
1391 return -ENODEV;
1392 }
1393 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
1394 index 74203ed5479fa..2397d83434931 100644
1395 --- a/drivers/usb/serial/option.c
1396 +++ b/drivers/usb/serial/option.c
1397 @@ -1219,6 +1219,14 @@ static const struct usb_device_id option_ids[] = {
1398 .driver_info = NCTRL(2) | RSVD(3) },
1399 { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1063, 0xff), /* Telit LN920 (ECM) */
1400 .driver_info = NCTRL(0) | RSVD(1) },
1401 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1070, 0xff), /* Telit FN990 (rmnet) */
1402 + .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
1403 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1071, 0xff), /* Telit FN990 (MBIM) */
1404 + .driver_info = NCTRL(0) | RSVD(1) },
1405 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1072, 0xff), /* Telit FN990 (RNDIS) */
1406 + .driver_info = NCTRL(2) | RSVD(3) },
1407 + { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1073, 0xff), /* Telit FN990 (ECM) */
1408 + .driver_info = NCTRL(0) | RSVD(1) },
1409 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910),
1410 .driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
1411 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
1412 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1413 index e442d400dbb2e..deb72fd7ec504 100644
1414 --- a/drivers/virtio/virtio_ring.c
1415 +++ b/drivers/virtio/virtio_ring.c
1416 @@ -263,7 +263,7 @@ size_t virtio_max_dma_size(struct virtio_device *vdev)
1417 size_t max_segment_size = SIZE_MAX;
1418
1419 if (vring_use_dma_api(vdev))
1420 - max_segment_size = dma_max_mapping_size(&vdev->dev);
1421 + max_segment_size = dma_max_mapping_size(vdev->dev.parent);
1422
1423 return max_segment_size;
1424 }
1425 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
1426 index 60378f3baaae1..34487bf1d7914 100644
1427 --- a/fs/fuse/dir.c
1428 +++ b/fs/fuse/dir.c
1429 @@ -1032,7 +1032,7 @@ int fuse_reverse_inval_entry(struct super_block *sb, u64 parent_nodeid,
1430 if (!parent)
1431 return -ENOENT;
1432
1433 - inode_lock(parent);
1434 + inode_lock_nested(parent, I_MUTEX_PARENT);
1435 if (!S_ISDIR(parent->i_mode))
1436 goto unlock;
1437
1438 diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
1439 index 3283cc2a4e42c..a48fcd4180c74 100644
1440 --- a/fs/nfsd/nfs4state.c
1441 +++ b/fs/nfsd/nfs4state.c
1442 @@ -1041,6 +1041,11 @@ hash_delegation_locked(struct nfs4_delegation *dp, struct nfs4_file *fp)
1443 return 0;
1444 }
1445
1446 +static bool delegation_hashed(struct nfs4_delegation *dp)
1447 +{
1448 + return !(list_empty(&dp->dl_perfile));
1449 +}
1450 +
1451 static bool
1452 unhash_delegation_locked(struct nfs4_delegation *dp)
1453 {
1454 @@ -1048,7 +1053,7 @@ unhash_delegation_locked(struct nfs4_delegation *dp)
1455
1456 lockdep_assert_held(&state_lock);
1457
1458 - if (list_empty(&dp->dl_perfile))
1459 + if (!delegation_hashed(dp))
1460 return false;
1461
1462 dp->dl_stid.sc_type = NFS4_CLOSED_DELEG_STID;
1463 @@ -4406,7 +4411,7 @@ static void nfsd4_cb_recall_prepare(struct nfsd4_callback *cb)
1464 * queued for a lease break. Don't queue it again.
1465 */
1466 spin_lock(&state_lock);
1467 - if (dp->dl_time == 0) {
1468 + if (delegation_hashed(dp) && dp->dl_time == 0) {
1469 dp->dl_time = get_seconds();
1470 list_add_tail(&dp->dl_recall_lru, &nn->del_recall_lru);
1471 }
1472 diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
1473 index 876de87f604cd..8c89eaea1583d 100644
1474 --- a/fs/overlayfs/dir.c
1475 +++ b/fs/overlayfs/dir.c
1476 @@ -113,8 +113,7 @@ kill_whiteout:
1477 goto out;
1478 }
1479
1480 -static int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry,
1481 - umode_t mode)
1482 +int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode)
1483 {
1484 int err;
1485 struct dentry *d, *dentry = *newdentry;
1486 diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
1487 index 6934bcf030f0b..a8e9da5f01eb5 100644
1488 --- a/fs/overlayfs/overlayfs.h
1489 +++ b/fs/overlayfs/overlayfs.h
1490 @@ -409,6 +409,7 @@ struct ovl_cattr {
1491
1492 #define OVL_CATTR(m) (&(struct ovl_cattr) { .mode = (m) })
1493
1494 +int ovl_mkdir_real(struct inode *dir, struct dentry **newdentry, umode_t mode);
1495 struct dentry *ovl_create_real(struct inode *dir, struct dentry *newdentry,
1496 struct ovl_cattr *attr);
1497 int ovl_cleanup(struct inode *dir, struct dentry *dentry);
1498 diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
1499 index f036d7544d4a6..f5cf0938f298d 100644
1500 --- a/fs/overlayfs/super.c
1501 +++ b/fs/overlayfs/super.c
1502 @@ -650,10 +650,14 @@ retry:
1503 goto retry;
1504 }
1505
1506 - work = ovl_create_real(dir, work, OVL_CATTR(attr.ia_mode));
1507 - err = PTR_ERR(work);
1508 - if (IS_ERR(work))
1509 - goto out_err;
1510 + err = ovl_mkdir_real(dir, &work, attr.ia_mode);
1511 + if (err)
1512 + goto out_dput;
1513 +
1514 + /* Weird filesystem returning with hashed negative (kernfs)? */
1515 + err = -EINVAL;
1516 + if (d_really_is_negative(work))
1517 + goto out_dput;
1518
1519 /*
1520 * Try to remove POSIX ACL xattrs from workdir. We are good if:
1521 diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
1522 index 0689d9bcdf841..f6a0f09ccc5f9 100644
1523 --- a/include/net/tc_act/tc_tunnel_key.h
1524 +++ b/include/net/tc_act/tc_tunnel_key.h
1525 @@ -52,7 +52,10 @@ static inline struct ip_tunnel_info *tcf_tunnel_info(const struct tc_action *a)
1526 {
1527 #ifdef CONFIG_NET_CLS_ACT
1528 struct tcf_tunnel_key *t = to_tunnel_key(a);
1529 - struct tcf_tunnel_key_params *params = rtnl_dereference(t->params);
1530 + struct tcf_tunnel_key_params *params;
1531 +
1532 + params = rcu_dereference_protected(t->params,
1533 + lockdep_is_held(&a->tcfa_lock));
1534
1535 return &params->tcft_enc_metadata->u.tun_info;
1536 #else
1537 @@ -69,7 +72,7 @@ tcf_tunnel_info_copy(const struct tc_action *a)
1538 if (tun) {
1539 size_t tun_size = sizeof(*tun) + tun->options_len;
1540 struct ip_tunnel_info *tun_copy = kmemdup(tun, tun_size,
1541 - GFP_KERNEL);
1542 + GFP_ATOMIC);
1543
1544 return tun_copy;
1545 }
1546 diff --git a/kernel/audit.c b/kernel/audit.c
1547 index 05ae208ad4423..d67fce9e3f8b8 100644
1548 --- a/kernel/audit.c
1549 +++ b/kernel/audit.c
1550 @@ -712,7 +712,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
1551 {
1552 int rc = 0;
1553 struct sk_buff *skb;
1554 - static unsigned int failed = 0;
1555 + unsigned int failed = 0;
1556
1557 /* NOTE: kauditd_thread takes care of all our locking, we just use
1558 * the netlink info passed to us (e.g. sk and portid) */
1559 @@ -729,32 +729,30 @@ static int kauditd_send_queue(struct sock *sk, u32 portid,
1560 continue;
1561 }
1562
1563 +retry:
1564 /* grab an extra skb reference in case of error */
1565 skb_get(skb);
1566 rc = netlink_unicast(sk, skb, portid, 0);
1567 if (rc < 0) {
1568 - /* fatal failure for our queue flush attempt? */
1569 + /* send failed - try a few times unless fatal error */
1570 if (++failed >= retry_limit ||
1571 rc == -ECONNREFUSED || rc == -EPERM) {
1572 - /* yes - error processing for the queue */
1573 sk = NULL;
1574 if (err_hook)
1575 (*err_hook)(skb);
1576 - if (!skb_hook)
1577 - goto out;
1578 - /* keep processing with the skb_hook */
1579 + if (rc == -EAGAIN)
1580 + rc = 0;
1581 + /* continue to drain the queue */
1582 continue;
1583 } else
1584 - /* no - requeue to preserve ordering */
1585 - skb_queue_head(queue, skb);
1586 + goto retry;
1587 } else {
1588 - /* it worked - drop the extra reference and continue */
1589 + /* skb sent - drop the extra reference and continue */
1590 consume_skb(skb);
1591 failed = 0;
1592 }
1593 }
1594
1595 -out:
1596 return (rc >= 0 ? 0 : rc);
1597 }
1598
1599 @@ -1557,7 +1555,8 @@ static int __net_init audit_net_init(struct net *net)
1600 audit_panic("cannot initialize netlink socket in namespace");
1601 return -ENOMEM;
1602 }
1603 - aunet->sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
1604 + /* limit the timeout in case auditd is blocked/stopped */
1605 + aunet->sk->sk_sndtimeo = HZ / 10;
1606
1607 return 0;
1608 }
1609 diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
1610 index 11ae2747701b5..7777c35e0a171 100644
1611 --- a/kernel/rcu/tree.c
1612 +++ b/kernel/rcu/tree.c
1613 @@ -1602,7 +1602,7 @@ static void rcu_gp_fqs(bool first_time)
1614 struct rcu_node *rnp = rcu_get_root();
1615
1616 WRITE_ONCE(rcu_state.gp_activity, jiffies);
1617 - rcu_state.n_force_qs++;
1618 + WRITE_ONCE(rcu_state.n_force_qs, rcu_state.n_force_qs + 1);
1619 if (first_time) {
1620 /* Collect dyntick-idle snapshots. */
1621 force_qs_rnp(dyntick_save_progress_counter);
1622 @@ -2207,7 +2207,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
1623 /* Reset ->qlen_last_fqs_check trigger if enough CBs have drained. */
1624 if (count == 0 && rdp->qlen_last_fqs_check != 0) {
1625 rdp->qlen_last_fqs_check = 0;
1626 - rdp->n_force_qs_snap = rcu_state.n_force_qs;
1627 + rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
1628 } else if (count < rdp->qlen_last_fqs_check - qhimark)
1629 rdp->qlen_last_fqs_check = count;
1630
1631 @@ -2535,10 +2535,10 @@ static void __call_rcu_core(struct rcu_data *rdp, struct rcu_head *head,
1632 } else {
1633 /* Give the grace period a kick. */
1634 rdp->blimit = DEFAULT_MAX_RCU_BLIMIT;
1635 - if (rcu_state.n_force_qs == rdp->n_force_qs_snap &&
1636 + if (READ_ONCE(rcu_state.n_force_qs) == rdp->n_force_qs_snap &&
1637 rcu_segcblist_first_pend_cb(&rdp->cblist) != head)
1638 rcu_force_quiescent_state();
1639 - rdp->n_force_qs_snap = rcu_state.n_force_qs;
1640 + rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
1641 rdp->qlen_last_fqs_check = rcu_segcblist_n_cbs(&rdp->cblist);
1642 }
1643 }
1644 @@ -3029,7 +3029,7 @@ int rcutree_prepare_cpu(unsigned int cpu)
1645 /* Set up local state, ensuring consistent view of global state. */
1646 raw_spin_lock_irqsave_rcu_node(rnp, flags);
1647 rdp->qlen_last_fqs_check = 0;
1648 - rdp->n_force_qs_snap = rcu_state.n_force_qs;
1649 + rdp->n_force_qs_snap = READ_ONCE(rcu_state.n_force_qs);
1650 rdp->blimit = blimit;
1651 if (rcu_segcblist_empty(&rdp->cblist) && /* No early-boot CBs? */
1652 !rcu_segcblist_is_offloaded(&rdp->cblist))
1653 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
1654 index 4fc2af4367a7b..36ed8bad3909e 100644
1655 --- a/kernel/time/timekeeping.c
1656 +++ b/kernel/time/timekeeping.c
1657 @@ -1236,8 +1236,7 @@ int do_settimeofday64(const struct timespec64 *ts)
1658 timekeeping_forward_now(tk);
1659
1660 xt = tk_xtime(tk);
1661 - ts_delta.tv_sec = ts->tv_sec - xt.tv_sec;
1662 - ts_delta.tv_nsec = ts->tv_nsec - xt.tv_nsec;
1663 + ts_delta = timespec64_sub(*ts, xt);
1664
1665 if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
1666 ret = -EINVAL;
1667 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1668 index 7dba091bc8617..ac083685214e0 100644
1669 --- a/net/core/skbuff.c
1670 +++ b/net/core/skbuff.c
1671 @@ -768,7 +768,7 @@ void skb_dump(const char *level, const struct sk_buff *skb, bool full_pkt)
1672 ntohs(skb->protocol), skb->pkt_type, skb->skb_iif);
1673
1674 if (dev)
1675 - printk("%sdev name=%s feat=0x%pNF\n",
1676 + printk("%sdev name=%s feat=%pNF\n",
1677 level, dev->name, &dev->features);
1678 if (sk)
1679 printk("%ssk family=%hu type=%u proto=%u\n",
1680 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
1681 index 4f71aca156662..f8f79672cc5f3 100644
1682 --- a/net/ipv4/inet_diag.c
1683 +++ b/net/ipv4/inet_diag.c
1684 @@ -200,6 +200,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
1685 r->idiag_state = sk->sk_state;
1686 r->idiag_timer = 0;
1687 r->idiag_retrans = 0;
1688 + r->idiag_expires = 0;
1689
1690 if (inet_diag_msg_attrs_fill(sk, skb, r, ext, user_ns, net_admin))
1691 goto errout;
1692 @@ -240,20 +241,17 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
1693 r->idiag_timer = 1;
1694 r->idiag_retrans = icsk->icsk_retransmits;
1695 r->idiag_expires =
1696 - jiffies_to_msecs(icsk->icsk_timeout - jiffies);
1697 + jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
1698 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
1699 r->idiag_timer = 4;
1700 r->idiag_retrans = icsk->icsk_probes_out;
1701 r->idiag_expires =
1702 - jiffies_to_msecs(icsk->icsk_timeout - jiffies);
1703 + jiffies_delta_to_msecs(icsk->icsk_timeout - jiffies);
1704 } else if (timer_pending(&sk->sk_timer)) {
1705 r->idiag_timer = 2;
1706 r->idiag_retrans = icsk->icsk_probes_out;
1707 r->idiag_expires =
1708 - jiffies_to_msecs(sk->sk_timer.expires - jiffies);
1709 - } else {
1710 - r->idiag_timer = 0;
1711 - r->idiag_expires = 0;
1712 + jiffies_delta_to_msecs(sk->sk_timer.expires - jiffies);
1713 }
1714
1715 if ((ext & (1 << (INET_DIAG_INFO - 1))) && handler->idiag_info_size) {
1716 @@ -338,16 +336,13 @@ static int inet_twsk_diag_fill(struct sock *sk,
1717 r = nlmsg_data(nlh);
1718 BUG_ON(tw->tw_state != TCP_TIME_WAIT);
1719
1720 - tmo = tw->tw_timer.expires - jiffies;
1721 - if (tmo < 0)
1722 - tmo = 0;
1723 -
1724 inet_diag_msg_common_fill(r, sk);
1725 r->idiag_retrans = 0;
1726
1727 r->idiag_state = tw->tw_substate;
1728 r->idiag_timer = 3;
1729 - r->idiag_expires = jiffies_to_msecs(tmo);
1730 + tmo = tw->tw_timer.expires - jiffies;
1731 + r->idiag_expires = jiffies_delta_to_msecs(tmo);
1732 r->idiag_rqueue = 0;
1733 r->idiag_wqueue = 0;
1734 r->idiag_uid = 0;
1735 @@ -381,7 +376,7 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
1736 offsetof(struct sock, sk_cookie));
1737
1738 tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
1739 - r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
1740 + r->idiag_expires = jiffies_delta_to_msecs(tmo);
1741 r->idiag_rqueue = 0;
1742 r->idiag_wqueue = 0;
1743 r->idiag_uid = 0;
1744 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
1745 index 7f9cae4c49e7e..16e75a996b749 100644
1746 --- a/net/ipv6/sit.c
1747 +++ b/net/ipv6/sit.c
1748 @@ -1876,7 +1876,6 @@ static int __net_init sit_init_net(struct net *net)
1749 return 0;
1750
1751 err_reg_dev:
1752 - ipip6_dev_free(sitn->fb_tunnel_dev);
1753 free_netdev(sitn->fb_tunnel_dev);
1754 err_alloc_dev:
1755 return err;
1756 diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
1757 index 4d1c335e06e57..49ec9bfb6c8e6 100644
1758 --- a/net/mac80211/agg-rx.c
1759 +++ b/net/mac80211/agg-rx.c
1760 @@ -9,7 +9,7 @@
1761 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
1762 * Copyright 2007-2010, Intel Corporation
1763 * Copyright(c) 2015-2017 Intel Deutschland GmbH
1764 - * Copyright (C) 2018 Intel Corporation
1765 + * Copyright (C) 2018-2021 Intel Corporation
1766 */
1767
1768 /**
1769 @@ -191,7 +191,8 @@ static void ieee80211_add_addbaext(struct ieee80211_sub_if_data *sdata,
1770 sband = ieee80211_get_sband(sdata);
1771 if (!sband)
1772 return;
1773 - he_cap = ieee80211_get_he_iftype_cap(sband, sdata->vif.type);
1774 + he_cap = ieee80211_get_he_iftype_cap(sband,
1775 + ieee80211_vif_type_p2p(&sdata->vif));
1776 if (!he_cap)
1777 return;
1778
1779 @@ -292,7 +293,8 @@ void ___ieee80211_start_rx_ba_session(struct sta_info *sta,
1780 goto end;
1781 }
1782
1783 - if (!sta->sta.ht_cap.ht_supported) {
1784 + if (!sta->sta.ht_cap.ht_supported &&
1785 + sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ) {
1786 ht_dbg(sta->sdata,
1787 "STA %pM erroneously requests BA session on tid %d w/o QoS\n",
1788 sta->sta.addr, tid);
1789 diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
1790 index b11883d268759..f140c2b94b2c6 100644
1791 --- a/net/mac80211/agg-tx.c
1792 +++ b/net/mac80211/agg-tx.c
1793 @@ -9,7 +9,7 @@
1794 * Copyright 2007, Michael Wu <flamingice@sourmilk.net>
1795 * Copyright 2007-2010, Intel Corporation
1796 * Copyright(c) 2015-2017 Intel Deutschland GmbH
1797 - * Copyright (C) 2018 - 2019 Intel Corporation
1798 + * Copyright (C) 2018 - 2021 Intel Corporation
1799 */
1800
1801 #include <linux/ieee80211.h>
1802 @@ -106,7 +106,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
1803 mgmt->u.action.u.addba_req.start_seq_num =
1804 cpu_to_le16(start_seq_num << 4);
1805
1806 - ieee80211_tx_skb(sdata, skb);
1807 + ieee80211_tx_skb_tid(sdata, skb, tid);
1808 }
1809
1810 void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
1811 @@ -213,6 +213,8 @@ ieee80211_agg_start_txq(struct sta_info *sta, int tid, bool enable)
1812 struct ieee80211_txq *txq = sta->sta.txq[tid];
1813 struct txq_info *txqi;
1814
1815 + lockdep_assert_held(&sta->ampdu_mlme.mtx);
1816 +
1817 if (!txq)
1818 return;
1819
1820 @@ -290,7 +292,6 @@ static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
1821 ieee80211_assign_tid_tx(sta, tid, NULL);
1822
1823 ieee80211_agg_splice_finish(sta->sdata, tid);
1824 - ieee80211_agg_start_txq(sta, tid, false);
1825
1826 kfree_rcu(tid_tx, rcu_head);
1827 }
1828 @@ -448,6 +449,42 @@ static void sta_addba_resp_timer_expired(struct timer_list *t)
1829 ieee80211_stop_tx_ba_session(&sta->sta, tid);
1830 }
1831
1832 +static void ieee80211_send_addba_with_timeout(struct sta_info *sta,
1833 + struct tid_ampdu_tx *tid_tx)
1834 +{
1835 + struct ieee80211_sub_if_data *sdata = sta->sdata;
1836 + struct ieee80211_local *local = sta->local;
1837 + u8 tid = tid_tx->tid;
1838 + u16 buf_size;
1839 +
1840 + /* activate the timer for the recipient's addBA response */
1841 + mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
1842 + ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
1843 + sta->sta.addr, tid);
1844 +
1845 + spin_lock_bh(&sta->lock);
1846 + sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
1847 + sta->ampdu_mlme.addba_req_num[tid]++;
1848 + spin_unlock_bh(&sta->lock);
1849 +
1850 + if (sta->sta.he_cap.has_he) {
1851 + buf_size = local->hw.max_tx_aggregation_subframes;
1852 + } else {
1853 + /*
1854 + * We really should use what the driver told us it will
1855 + * transmit as the maximum, but certain APs (e.g. the
1856 + * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012)
1857 + * will crash when we use a lower number.
1858 + */
1859 + buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
1860 + }
1861 +
1862 + /* send AddBA request */
1863 + ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
1864 + tid_tx->dialog_token, tid_tx->ssn,
1865 + buf_size, tid_tx->timeout);
1866 +}
1867 +
1868 void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
1869 {
1870 struct tid_ampdu_tx *tid_tx;
1871 @@ -462,7 +499,6 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
1872 .timeout = 0,
1873 };
1874 int ret;
1875 - u16 buf_size;
1876
1877 tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
1878
1879 @@ -485,6 +521,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
1880
1881 params.ssn = sta->tid_seq[tid] >> 4;
1882 ret = drv_ampdu_action(local, sdata, &params);
1883 + tid_tx->ssn = params.ssn;
1884 if (ret) {
1885 ht_dbg(sdata,
1886 "BA request denied - HW unavailable for %pM tid %d\n",
1887 @@ -501,32 +538,7 @@ void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
1888 return;
1889 }
1890
1891 - /* activate the timer for the recipient's addBA response */
1892 - mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
1893 - ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
1894 - sta->sta.addr, tid);
1895 -
1896 - spin_lock_bh(&sta->lock);
1897 - sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
1898 - sta->ampdu_mlme.addba_req_num[tid]++;
1899 - spin_unlock_bh(&sta->lock);
1900 -
1901 - if (sta->sta.he_cap.has_he) {
1902 - buf_size = local->hw.max_tx_aggregation_subframes;
1903 - } else {
1904 - /*
1905 - * We really should use what the driver told us it will
1906 - * transmit as the maximum, but certain APs (e.g. the
1907 - * LinkSys WRT120N with FW v1.0.07 build 002 Jun 18 2012)
1908 - * will crash when we use a lower number.
1909 - */
1910 - buf_size = IEEE80211_MAX_AMPDU_BUF_HT;
1911 - }
1912 -
1913 - /* send AddBA request */
1914 - ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
1915 - tid_tx->dialog_token, params.ssn,
1916 - buf_size, tid_tx->timeout);
1917 + ieee80211_send_addba_with_timeout(sta, tid_tx);
1918 }
1919
1920 /*
1921 @@ -571,7 +583,8 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
1922 "Requested to start BA session on reserved tid=%d", tid))
1923 return -EINVAL;
1924
1925 - if (!pubsta->ht_cap.ht_supported)
1926 + if (!pubsta->ht_cap.ht_supported &&
1927 + sta->sdata->vif.bss_conf.chandef.chan->band != NL80211_BAND_6GHZ)
1928 return -EINVAL;
1929
1930 if (WARN_ON_ONCE(!local->ops->ampdu_action))
1931 @@ -860,6 +873,7 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
1932 {
1933 struct ieee80211_sub_if_data *sdata = sta->sdata;
1934 bool send_delba = false;
1935 + bool start_txq = false;
1936
1937 ht_dbg(sdata, "Stopping Tx BA session for %pM tid %d\n",
1938 sta->sta.addr, tid);
1939 @@ -877,10 +891,14 @@ void ieee80211_stop_tx_ba_cb(struct sta_info *sta, int tid,
1940 send_delba = true;
1941
1942 ieee80211_remove_tid_tx(sta, tid);
1943 + start_txq = true;
1944
1945 unlock_sta:
1946 spin_unlock_bh(&sta->lock);
1947
1948 + if (start_txq)
1949 + ieee80211_agg_start_txq(sta, tid, false);
1950 +
1951 if (send_delba)
1952 ieee80211_send_delba(sdata, sta->sta.addr, tid,
1953 WLAN_BACK_INITIATOR, WLAN_REASON_QSTA_NOT_USE);
1954 diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
1955 index 2c9b3eb8b6525..f4c7e0af896b1 100644
1956 --- a/net/mac80211/driver-ops.h
1957 +++ b/net/mac80211/driver-ops.h
1958 @@ -1202,8 +1202,11 @@ static inline void drv_wake_tx_queue(struct ieee80211_local *local,
1959 {
1960 struct ieee80211_sub_if_data *sdata = vif_to_sdata(txq->txq.vif);
1961
1962 - if (local->in_reconfig)
1963 + /* In reconfig don't transmit now, but mark for waking later */
1964 + if (local->in_reconfig) {
1965 + set_bit(IEEE80211_TXQ_STOP_NETIF_TX, &txq->flags);
1966 return;
1967 + }
1968
1969 if (!check_sdata_in_driver(sdata))
1970 return;
1971 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
1972 index ccaf2389ccc1d..5c727af01143f 100644
1973 --- a/net/mac80211/mlme.c
1974 +++ b/net/mac80211/mlme.c
1975 @@ -2418,11 +2418,18 @@ static void ieee80211_sta_tx_wmm_ac_notify(struct ieee80211_sub_if_data *sdata,
1976 u16 tx_time)
1977 {
1978 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
1979 - u16 tid = ieee80211_get_tid(hdr);
1980 - int ac = ieee80211_ac_from_tid(tid);
1981 - struct ieee80211_sta_tx_tspec *tx_tspec = &ifmgd->tx_tspec[ac];
1982 + u16 tid;
1983 + int ac;
1984 + struct ieee80211_sta_tx_tspec *tx_tspec;
1985 unsigned long now = jiffies;
1986
1987 + if (!ieee80211_is_data_qos(hdr->frame_control))
1988 + return;
1989 +
1990 + tid = ieee80211_get_tid(hdr);
1991 + ac = ieee80211_ac_from_tid(tid);
1992 + tx_tspec = &ifmgd->tx_tspec[ac];
1993 +
1994 if (likely(!tx_tspec->admitted_time))
1995 return;
1996
1997 diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
1998 index 2eb73be9b9865..be0df78d4a799 100644
1999 --- a/net/mac80211/sta_info.h
2000 +++ b/net/mac80211/sta_info.h
2001 @@ -180,6 +180,7 @@ struct tid_ampdu_tx {
2002 u8 stop_initiator;
2003 bool tx_stop;
2004 u16 buf_size;
2005 + u16 ssn;
2006
2007 u16 failed_bar_ssn;
2008 bool bar_pending;
2009 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
2010 index decd46b383938..c1c117fdf3184 100644
2011 --- a/net/mac80211/util.c
2012 +++ b/net/mac80211/util.c
2013 @@ -1227,6 +1227,8 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
2014 elems->max_idle_period_ie = (void *)pos;
2015 break;
2016 case WLAN_EID_EXTENSION:
2017 + if (!elen)
2018 + break;
2019 if (pos[0] == WLAN_EID_EXT_HE_MU_EDCA &&
2020 elen >= (sizeof(*elems->mu_edca_param_set) + 1)) {
2021 elems->mu_edca_param_set = (void *)&pos[1];
2022 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2023 index 0ffbf3d17911a..6062bd5bf132b 100644
2024 --- a/net/packet/af_packet.c
2025 +++ b/net/packet/af_packet.c
2026 @@ -4453,9 +4453,10 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2027 }
2028
2029 out_free_pg_vec:
2030 - bitmap_free(rx_owner_map);
2031 - if (pg_vec)
2032 + if (pg_vec) {
2033 + bitmap_free(rx_owner_map);
2034 free_pg_vec(pg_vec, order, req->tp_block_nr);
2035 + }
2036 out:
2037 return err;
2038 }
2039 diff --git a/net/rds/connection.c b/net/rds/connection.c
2040 index c85bd6340eaa7..92ff40e7a66cf 100644
2041 --- a/net/rds/connection.c
2042 +++ b/net/rds/connection.c
2043 @@ -253,6 +253,7 @@ static struct rds_connection *__rds_conn_create(struct net *net,
2044 * should end up here, but if it
2045 * does, reset/destroy the connection.
2046 */
2047 + kfree(conn->c_path);
2048 kmem_cache_free(rds_conn_slab, conn);
2049 conn = ERR_PTR(-EOPNOTSUPP);
2050 goto out;
2051 diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
2052 index 74450b0f69fc5..214f4efdd9920 100644
2053 --- a/net/sched/act_sample.c
2054 +++ b/net/sched/act_sample.c
2055 @@ -265,14 +265,12 @@ tcf_sample_get_group(const struct tc_action *a,
2056 struct tcf_sample *s = to_sample(a);
2057 struct psample_group *group;
2058
2059 - spin_lock_bh(&s->tcf_lock);
2060 group = rcu_dereference_protected(s->psample_group,
2061 lockdep_is_held(&s->tcf_lock));
2062 if (group) {
2063 psample_group_take(group);
2064 *destructor = tcf_psample_group_put;
2065 }
2066 - spin_unlock_bh(&s->tcf_lock);
2067
2068 return group;
2069 }
2070 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
2071 index 7f20fd37e01e0..a4c61205462ac 100644
2072 --- a/net/sched/cls_api.c
2073 +++ b/net/sched/cls_api.c
2074 @@ -3436,7 +3436,7 @@ static void tcf_sample_get_group(struct flow_action_entry *entry,
2075 int tc_setup_flow_action(struct flow_action *flow_action,
2076 const struct tcf_exts *exts, bool rtnl_held)
2077 {
2078 - const struct tc_action *act;
2079 + struct tc_action *act;
2080 int i, j, k, err = 0;
2081
2082 if (!exts)
2083 @@ -3450,6 +3450,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
2084 struct flow_action_entry *entry;
2085
2086 entry = &flow_action->entries[j];
2087 + spin_lock_bh(&act->tcfa_lock);
2088 if (is_tcf_gact_ok(act)) {
2089 entry->id = FLOW_ACTION_ACCEPT;
2090 } else if (is_tcf_gact_shot(act)) {
2091 @@ -3490,13 +3491,13 @@ int tc_setup_flow_action(struct flow_action *flow_action,
2092 break;
2093 default:
2094 err = -EOPNOTSUPP;
2095 - goto err_out;
2096 + goto err_out_locked;
2097 }
2098 } else if (is_tcf_tunnel_set(act)) {
2099 entry->id = FLOW_ACTION_TUNNEL_ENCAP;
2100 err = tcf_tunnel_encap_get_tunnel(entry, act);
2101 if (err)
2102 - goto err_out;
2103 + goto err_out_locked;
2104 } else if (is_tcf_tunnel_release(act)) {
2105 entry->id = FLOW_ACTION_TUNNEL_DECAP;
2106 } else if (is_tcf_pedit(act)) {
2107 @@ -3510,7 +3511,7 @@ int tc_setup_flow_action(struct flow_action *flow_action,
2108 break;
2109 default:
2110 err = -EOPNOTSUPP;
2111 - goto err_out;
2112 + goto err_out_locked;
2113 }
2114 entry->mangle.htype = tcf_pedit_htype(act, k);
2115 entry->mangle.mask = tcf_pedit_mask(act, k);
2116 @@ -3561,15 +3562,17 @@ int tc_setup_flow_action(struct flow_action *flow_action,
2117 entry->mpls_mangle.ttl = tcf_mpls_ttl(act);
2118 break;
2119 default:
2120 - goto err_out;
2121 + err = -EOPNOTSUPP;
2122 + goto err_out_locked;
2123 }
2124 } else if (is_tcf_skbedit_ptype(act)) {
2125 entry->id = FLOW_ACTION_PTYPE;
2126 entry->ptype = tcf_skbedit_ptype(act);
2127 } else {
2128 err = -EOPNOTSUPP;
2129 - goto err_out;
2130 + goto err_out_locked;
2131 }
2132 + spin_unlock_bh(&act->tcfa_lock);
2133
2134 if (!is_tcf_pedit(act))
2135 j++;
2136 @@ -3583,6 +3586,9 @@ err_out:
2137 tc_cleanup_flow_action(flow_action);
2138
2139 return err;
2140 +err_out_locked:
2141 + spin_unlock_bh(&act->tcfa_lock);
2142 + goto err_out;
2143 }
2144 EXPORT_SYMBOL(tc_setup_flow_action);
2145
2146 diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
2147 index e8eebe40e0ae9..0eb4d4a568f77 100644
2148 --- a/net/sched/sch_cake.c
2149 +++ b/net/sched/sch_cake.c
2150 @@ -2724,7 +2724,7 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
2151 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data),
2152 GFP_KERNEL);
2153 if (!q->tins)
2154 - goto nomem;
2155 + return -ENOMEM;
2156
2157 for (i = 0; i < CAKE_MAX_TINS; i++) {
2158 struct cake_tin_data *b = q->tins + i;
2159 @@ -2754,10 +2754,6 @@ static int cake_init(struct Qdisc *sch, struct nlattr *opt,
2160 q->min_netlen = ~0;
2161 q->min_adjlen = ~0;
2162 return 0;
2163 -
2164 -nomem:
2165 - cake_destroy(sch);
2166 - return -ENOMEM;
2167 }
2168
2169 static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
2170 diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
2171 index fa3b20e5f4608..06684ac346abd 100644
2172 --- a/net/smc/af_smc.c
2173 +++ b/net/smc/af_smc.c
2174 @@ -183,7 +183,9 @@ static int smc_release(struct socket *sock)
2175 /* cleanup for a dangling non-blocking connect */
2176 if (smc->connect_nonblock && sk->sk_state == SMC_INIT)
2177 tcp_abort(smc->clcsock->sk, ECONNABORTED);
2178 - flush_work(&smc->connect_work);
2179 +
2180 + if (cancel_work_sync(&smc->connect_work))
2181 + sock_put(&smc->sk); /* sock_hold in smc_connect for passive closing */
2182
2183 if (sk->sk_state == SMC_LISTEN)
2184 /* smc_close_non_accepted() is called and acquires
2185 diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
2186 index f459ae883a0a6..a4ca050815aba 100755
2187 --- a/scripts/recordmcount.pl
2188 +++ b/scripts/recordmcount.pl
2189 @@ -252,7 +252,7 @@ if ($arch eq "x86_64") {
2190
2191 } elsif ($arch eq "s390" && $bits == 64) {
2192 if ($cc =~ /-DCC_USING_HOTPATCH/) {
2193 - $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*brcl\\s*0,[0-9a-f]+ <([^\+]*)>\$";
2194 + $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*c0 04 00 00 00 00\\s*(bcrl\\s*0,|jgnop\\s*)[0-9a-f]+ <([^\+]*)>\$";
2195 $mcount_adjust = 0;
2196 } else {
2197 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
2198 diff --git a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
2199 index 231d79e57774e..cfe75536d8a55 100644
2200 --- a/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
2201 +++ b/tools/testing/selftests/kvm/kvm_create_max_vcpus.c
2202 @@ -12,6 +12,7 @@
2203 #include <stdio.h>
2204 #include <stdlib.h>
2205 #include <string.h>
2206 +#include <sys/resource.h>
2207
2208 #include "test_util.h"
2209
2210 @@ -43,10 +44,39 @@ int main(int argc, char *argv[])
2211 {
2212 int kvm_max_vcpu_id = kvm_check_cap(KVM_CAP_MAX_VCPU_ID);
2213 int kvm_max_vcpus = kvm_check_cap(KVM_CAP_MAX_VCPUS);
2214 + /*
2215 + * Number of file descriptors reqired, KVM_CAP_MAX_VCPUS for vCPU fds +
2216 + * an arbitrary number for everything else.
2217 + */
2218 + int nr_fds_wanted = kvm_max_vcpus + 100;
2219 + struct rlimit rl;
2220
2221 printf("KVM_CAP_MAX_VCPU_ID: %d\n", kvm_max_vcpu_id);
2222 printf("KVM_CAP_MAX_VCPUS: %d\n", kvm_max_vcpus);
2223
2224 + /*
2225 + * Check that we're allowed to open nr_fds_wanted file descriptors and
2226 + * try raising the limits if needed.
2227 + */
2228 + TEST_ASSERT(!getrlimit(RLIMIT_NOFILE, &rl), "getrlimit() failed!");
2229 +
2230 + if (rl.rlim_cur < nr_fds_wanted) {
2231 + rl.rlim_cur = nr_fds_wanted;
2232 + if (rl.rlim_max < nr_fds_wanted) {
2233 + int old_rlim_max = rl.rlim_max;
2234 + rl.rlim_max = nr_fds_wanted;
2235 +
2236 + int r = setrlimit(RLIMIT_NOFILE, &rl);
2237 + if (r < 0) {
2238 + printf("RLIMIT_NOFILE hard limit is too low (%d, wanted %d)\n",
2239 + old_rlim_max, nr_fds_wanted);
2240 + exit(KSFT_SKIP);
2241 + }
2242 + } else {
2243 + TEST_ASSERT(!setrlimit(RLIMIT_NOFILE, &rl), "setrlimit() failed!");
2244 + }
2245 + }
2246 +
2247 /*
2248 * Upstream KVM prior to 4.8 does not support KVM_CAP_MAX_VCPU_ID.
2249 * Userspace is supposed to use KVM_CAP_MAX_VCPUS as the maximum ID
2250 diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh
2251 index 782a8da5d9500..157822331954d 100755
2252 --- a/tools/testing/selftests/net/fcnal-test.sh
2253 +++ b/tools/testing/selftests/net/fcnal-test.sh
2254 @@ -1491,8 +1491,9 @@ ipv4_addr_bind_vrf()
2255 for a in ${NSA_IP} ${VRF_IP}
2256 do
2257 log_start
2258 + show_hint "Socket not bound to VRF, but address is in VRF"
2259 run_cmd nettest -s -R -P icmp -l ${a} -b
2260 - log_test_addr ${a} $? 0 "Raw socket bind to local address"
2261 + log_test_addr ${a} $? 1 "Raw socket bind to local address"
2262
2263 log_start
2264 run_cmd nettest -s -R -P icmp -l ${a} -d ${NSA_DEV} -b
2265 @@ -1884,7 +1885,7 @@ ipv6_ping_vrf()
2266 log_start
2267 show_hint "Fails since VRF device does not support linklocal or multicast"
2268 run_cmd ${ping6} -c1 -w1 ${a}
2269 - log_test_addr ${a} $? 2 "ping out, VRF bind"
2270 + log_test_addr ${a} $? 1 "ping out, VRF bind"
2271 done
2272
2273 for a in ${NSB_IP6} ${NSB_LO_IP6} ${NSB_LINKIP6}%${NSA_DEV} ${MCAST}%${NSA_DEV}
2274 @@ -2890,11 +2891,14 @@ ipv6_addr_bind_novrf()
2275 run_cmd nettest -6 -s -l ${a} -d ${NSA_DEV} -t1 -b
2276 log_test_addr ${a} $? 0 "TCP socket bind to local address after device bind"
2277
2278 + # Sadly, the kernel allows binding a socket to a device and then
2279 + # binding to an address not on the device. So this test passes
2280 + # when it really should not
2281 a=${NSA_LO_IP6}
2282 log_start
2283 - show_hint "Should fail with 'Cannot assign requested address'"
2284 - run_cmd nettest -6 -s -l ${a} -d ${NSA_DEV} -t1 -b
2285 - log_test_addr ${a} $? 1 "TCP socket bind to out of scope local address"
2286 + show_hint "Tecnically should fail since address is not on device but kernel allows"
2287 + run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
2288 + log_test_addr ${a} $? 0 "TCP socket bind to out of scope local address"
2289 }
2290
2291 ipv6_addr_bind_vrf()
2292 @@ -2935,10 +2939,15 @@ ipv6_addr_bind_vrf()
2293 run_cmd nettest -6 -s -l ${a} -d ${NSA_DEV} -t1 -b
2294 log_test_addr ${a} $? 0 "TCP socket bind to local address with device bind"
2295
2296 + # Sadly, the kernel allows binding a socket to a device and then
2297 + # binding to an address not on the device. The only restriction
2298 + # is that the address is valid in the L3 domain. So this test
2299 + # passes when it really should not
2300 a=${VRF_IP6}
2301 log_start
2302 - run_cmd nettest -6 -s -l ${a} -d ${NSA_DEV} -t1 -b
2303 - log_test_addr ${a} $? 1 "TCP socket bind to VRF address with device bind"
2304 + show_hint "Tecnically should fail since address is not on device but kernel allows"
2305 + run_cmd nettest -6 -s -l ${a} -I ${NSA_DEV} -t1 -b
2306 + log_test_addr ${a} $? 0 "TCP socket bind to VRF address with device bind"
2307
2308 a=${NSA_LO_IP6}
2309 log_start
2310 diff --git a/tools/testing/selftests/net/forwarding/forwarding.config.sample b/tools/testing/selftests/net/forwarding/forwarding.config.sample
2311 index e2adb533c8fcb..e71c61ee4cc67 100644
2312 --- a/tools/testing/selftests/net/forwarding/forwarding.config.sample
2313 +++ b/tools/testing/selftests/net/forwarding/forwarding.config.sample
2314 @@ -13,6 +13,8 @@ NETIFS[p5]=veth4
2315 NETIFS[p6]=veth5
2316 NETIFS[p7]=veth6
2317 NETIFS[p8]=veth7
2318 +NETIFS[p9]=veth8
2319 +NETIFS[p10]=veth9
2320
2321 ##############################################################################
2322 # Defines