Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0128-4.19.29-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3407 - (show annotations) (download)
Fri Aug 2 11:47:39 2019 UTC (4 years, 8 months ago) by niro
File size: 198034 byte(s)
-linux-4.19.29
1 diff --git a/Makefile b/Makefile
2 index c6ac023ba33a..6e526583291c 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 28
10 +SUBLEVEL = 29
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
15 index 27a1ee28c3bb..94efca78c42f 100644
16 --- a/arch/arm/boot/dts/exynos3250.dtsi
17 +++ b/arch/arm/boot/dts/exynos3250.dtsi
18 @@ -168,6 +168,9 @@
19 interrupt-controller;
20 #interrupt-cells = <3>;
21 interrupt-parent = <&gic>;
22 + clock-names = "clkout8";
23 + clocks = <&cmu CLK_FIN_PLL>;
24 + #clock-cells = <1>;
25 };
26
27 mipi_phy: video-phy {
28 diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
29 index a09e46c9dbc0..00820d239753 100644
30 --- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
31 +++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
32 @@ -49,7 +49,7 @@
33 };
34
35 emmc_pwrseq: pwrseq {
36 - pinctrl-0 = <&sd1_cd>;
37 + pinctrl-0 = <&emmc_rstn>;
38 pinctrl-names = "default";
39 compatible = "mmc-pwrseq-emmc";
40 reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
41 @@ -161,12 +161,6 @@
42 cpu0-supply = <&buck2_reg>;
43 };
44
45 -/* RSTN signal for eMMC */
46 -&sd1_cd {
47 - samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
48 - samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
49 -};
50 -
51 &pinctrl_1 {
52 gpio_power_key: power_key {
53 samsung,pins = "gpx1-3";
54 @@ -184,6 +178,11 @@
55 samsung,pins = "gpx3-7";
56 samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
57 };
58 +
59 + emmc_rstn: emmc-rstn {
60 + samsung,pins = "gpk1-2";
61 + samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
62 + };
63 };
64
65 &ehci {
66 diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
67 index 2f4f40882dab..27214e6ebe4f 100644
68 --- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
69 +++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
70 @@ -334,7 +334,7 @@
71 buck8_reg: BUCK8 {
72 regulator-name = "vdd_1.8v_ldo";
73 regulator-min-microvolt = <800000>;
74 - regulator-max-microvolt = <1500000>;
75 + regulator-max-microvolt = <2000000>;
76 regulator-always-on;
77 regulator-boot-on;
78 };
79 diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
80 index 844caa39364f..50083cecc6c9 100644
81 --- a/arch/arm/boot/dts/imx6sx.dtsi
82 +++ b/arch/arm/boot/dts/imx6sx.dtsi
83 @@ -462,7 +462,7 @@
84 };
85
86 gpt: gpt@2098000 {
87 - compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
88 + compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
89 reg = <0x02098000 0x4000>;
90 interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
91 clocks = <&clks IMX6SX_CLK_GPT_BUS>,
92 diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
93 index 0d9faf1a51ea..a86b89086334 100644
94 --- a/arch/arm/boot/dts/meson.dtsi
95 +++ b/arch/arm/boot/dts/meson.dtsi
96 @@ -263,7 +263,7 @@
97 compatible = "amlogic,meson6-dwmac", "snps,dwmac";
98 reg = <0xc9410000 0x10000
99 0xc1108108 0x4>;
100 - interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
101 + interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
102 interrupt-names = "macirq";
103 status = "disabled";
104 };
105 diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
106 index ef3177d3da3d..8fdeeffecbdb 100644
107 --- a/arch/arm/boot/dts/meson8b-odroidc1.dts
108 +++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
109 @@ -125,7 +125,6 @@
110 /* Realtek RTL8211F (0x001cc916) */
111 eth_phy: ethernet-phy@0 {
112 reg = <0>;
113 - eee-broken-1000t;
114 interrupt-parent = <&gpio_intc>;
115 /* GPIOH_3 */
116 interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
117 @@ -172,8 +171,7 @@
118 cap-sd-highspeed;
119 disable-wp;
120
121 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
122 - cd-inverted;
123 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
124
125 vmmc-supply = <&tflash_vdd>;
126 vqmmc-supply = <&tf_io>;
127 diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
128 index f5853610b20b..6ac02beb5fa7 100644
129 --- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
130 +++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
131 @@ -206,8 +206,7 @@
132 cap-sd-highspeed;
133 disable-wp;
134
135 - cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
136 - cd-inverted;
137 + cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
138
139 vmmc-supply = <&vcc_3v3>;
140 };
141 diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
142 index ddc7a7bb33c0..f57acf8f66b9 100644
143 --- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
144 +++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
145 @@ -105,7 +105,7 @@
146 interrupts-extended = <
147 &cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
148 &cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
149 - &cpcap 48 1
150 + &cpcap 48 0
151 >;
152 interrupt-names =
153 "id_ground", "id_float", "se0conn", "vbusvld",
154 diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
155 index 0d9b85317529..e142e6c70a59 100644
156 --- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
157 +++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
158 @@ -370,6 +370,19 @@
159 compatible = "ti,omap2-onenand";
160 reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
161
162 + /*
163 + * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
164 + * bootloader set values when booted with v4.19 using both N950
165 + * and N9 devices (OneNAND Manufacturer: Samsung):
166 + *
167 + * gpmc cs0 before gpmc_cs_program_settings:
168 + * cs0 GPMC_CS_CONFIG1: 0xfd001202
169 + * cs0 GPMC_CS_CONFIG2: 0x00181800
170 + * cs0 GPMC_CS_CONFIG3: 0x00030300
171 + * cs0 GPMC_CS_CONFIG4: 0x18001804
172 + * cs0 GPMC_CS_CONFIG5: 0x03171d1d
173 + * cs0 GPMC_CS_CONFIG6: 0x97080000
174 + */
175 gpmc,sync-read;
176 gpmc,sync-write;
177 gpmc,burst-length = <16>;
178 @@ -379,26 +392,27 @@
179 gpmc,device-width = <2>;
180 gpmc,mux-add-data = <2>;
181 gpmc,cs-on-ns = <0>;
182 - gpmc,cs-rd-off-ns = <87>;
183 - gpmc,cs-wr-off-ns = <87>;
184 + gpmc,cs-rd-off-ns = <122>;
185 + gpmc,cs-wr-off-ns = <122>;
186 gpmc,adv-on-ns = <0>;
187 - gpmc,adv-rd-off-ns = <10>;
188 - gpmc,adv-wr-off-ns = <10>;
189 - gpmc,oe-on-ns = <15>;
190 - gpmc,oe-off-ns = <87>;
191 + gpmc,adv-rd-off-ns = <15>;
192 + gpmc,adv-wr-off-ns = <15>;
193 + gpmc,oe-on-ns = <20>;
194 + gpmc,oe-off-ns = <122>;
195 gpmc,we-on-ns = <0>;
196 - gpmc,we-off-ns = <87>;
197 - gpmc,rd-cycle-ns = <112>;
198 - gpmc,wr-cycle-ns = <112>;
199 - gpmc,access-ns = <81>;
200 + gpmc,we-off-ns = <122>;
201 + gpmc,rd-cycle-ns = <148>;
202 + gpmc,wr-cycle-ns = <148>;
203 + gpmc,access-ns = <117>;
204 gpmc,page-burst-access-ns = <15>;
205 gpmc,bus-turnaround-ns = <0>;
206 gpmc,cycle2cycle-delay-ns = <0>;
207 gpmc,wait-monitoring-ns = <0>;
208 - gpmc,clk-activation-ns = <5>;
209 - gpmc,wr-data-mux-bus-ns = <30>;
210 - gpmc,wr-access-ns = <81>;
211 - gpmc,sync-clk-ps = <15000>;
212 + gpmc,clk-activation-ns = <10>;
213 + gpmc,wr-data-mux-bus-ns = <40>;
214 + gpmc,wr-access-ns = <117>;
215 +
216 + gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
217
218 /*
219 * MTD partition table corresponding to Nokia's MeeGo 1.2
220 diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
221 index 5d23667dc2d2..25540b7694d5 100644
222 --- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
223 +++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
224 @@ -53,7 +53,7 @@
225
226 aliases {
227 serial0 = &uart0;
228 - /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
229 + ethernet0 = &emac;
230 ethernet1 = &sdiowifi;
231 };
232
233 diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
234 index ed36dcab80f1..f51919974183 100644
235 --- a/arch/arm/plat-pxa/ssp.c
236 +++ b/arch/arm/plat-pxa/ssp.c
237 @@ -190,8 +190,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
238 if (ssp == NULL)
239 return -ENODEV;
240
241 - iounmap(ssp->mmio_base);
242 -
243 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
244 release_mem_region(res->start, resource_size(res));
245
246 @@ -201,7 +199,6 @@ static int pxa_ssp_remove(struct platform_device *pdev)
247 list_del(&ssp->node);
248 mutex_unlock(&ssp_lock);
249
250 - kfree(ssp);
251 return 0;
252 }
253
254 diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
255 index f4964bee6a1a..e80a792827ed 100644
256 --- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
257 +++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
258 @@ -118,6 +118,7 @@
259 reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
260 clocks = <&pmic>;
261 clock-names = "ext_clock";
262 + post-power-on-delay-ms = <10>;
263 power-off-delay-us = <10>;
264 };
265
266 @@ -300,7 +301,6 @@
267
268 dwmmc_0: dwmmc0@f723d000 {
269 cap-mmc-highspeed;
270 - mmc-hs200-1_8v;
271 non-removable;
272 bus-width = <0x8>;
273 vmmc-supply = <&ldo19>;
274 diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
275 index cd3865e7a270..8c86c41a0d25 100644
276 --- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
277 +++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
278 @@ -399,7 +399,7 @@
279 };
280
281 intc: interrupt-controller@9bc0000 {
282 - compatible = "arm,gic-v3";
283 + compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
284 #interrupt-cells = <3>;
285 interrupt-controller;
286 #redistributor-regions = <1>;
287 diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
288 index cbd35c00b4af..33cb0281c39c 100644
289 --- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
290 +++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
291 @@ -1161,6 +1161,9 @@
292 <&cpg CPG_CORE R8A7796_CLK_S3D1>,
293 <&scif_clk>;
294 clock-names = "fck", "brg_int", "scif_clk";
295 + dmas = <&dmac1 0x13>, <&dmac1 0x12>,
296 + <&dmac2 0x13>, <&dmac2 0x12>;
297 + dma-names = "tx", "rx", "tx", "rx";
298 power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
299 resets = <&cpg 310>;
300 status = "disabled";
301 diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
302 index 0cd44461a0bd..f60f08ba1a6f 100644
303 --- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
304 +++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
305 @@ -951,6 +951,9 @@
306 <&cpg CPG_CORE R8A77965_CLK_S3D1>,
307 <&scif_clk>;
308 clock-names = "fck", "brg_int", "scif_clk";
309 + dmas = <&dmac1 0x13>, <&dmac1 0x12>,
310 + <&dmac2 0x13>, <&dmac2 0x12>;
311 + dma-names = "tx", "rx", "tx", "rx";
312 power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
313 resets = <&cpg 310>;
314 status = "disabled";
315 diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
316 index eb5e8bddb610..8954c8c6f547 100644
317 --- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
318 +++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
319 @@ -101,6 +101,7 @@
320 sdio_pwrseq: sdio_pwrseq {
321 compatible = "mmc-pwrseq-simple";
322 reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
323 + post-power-on-delay-ms = <10>;
324 };
325 };
326
327 diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
328 index b5a367d4bba6..30bb13797034 100644
329 --- a/arch/arm64/kernel/probes/kprobes.c
330 +++ b/arch/arm64/kernel/probes/kprobes.c
331 @@ -478,13 +478,13 @@ bool arch_within_kprobe_blacklist(unsigned long addr)
332 addr < (unsigned long)__entry_text_end) ||
333 (addr >= (unsigned long)__idmap_text_start &&
334 addr < (unsigned long)__idmap_text_end) ||
335 + (addr >= (unsigned long)__hyp_text_start &&
336 + addr < (unsigned long)__hyp_text_end) ||
337 !!search_exception_tables(addr))
338 return true;
339
340 if (!is_kernel_in_hyp_mode()) {
341 - if ((addr >= (unsigned long)__hyp_text_start &&
342 - addr < (unsigned long)__hyp_text_end) ||
343 - (addr >= (unsigned long)__hyp_idmap_text_start &&
344 + if ((addr >= (unsigned long)__hyp_idmap_text_start &&
345 addr < (unsigned long)__hyp_idmap_text_end))
346 return true;
347 }
348 diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
349 index 50cff3cbcc6d..4f7b1fa31cf5 100644
350 --- a/arch/mips/boot/dts/ingenic/ci20.dts
351 +++ b/arch/mips/boot/dts/ingenic/ci20.dts
352 @@ -76,7 +76,7 @@
353 status = "okay";
354
355 pinctrl-names = "default";
356 - pinctrl-0 = <&pins_uart2>;
357 + pinctrl-0 = <&pins_uart3>;
358 };
359
360 &uart4 {
361 @@ -196,9 +196,9 @@
362 bias-disable;
363 };
364
365 - pins_uart2: uart2 {
366 - function = "uart2";
367 - groups = "uart2-data", "uart2-hwflow";
368 + pins_uart3: uart3 {
369 + function = "uart3";
370 + groups = "uart3-data", "uart3-hwflow";
371 bias-disable;
372 };
373
374 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
375 index d4f7fd4550e1..85522c137f19 100644
376 --- a/arch/mips/kernel/process.c
377 +++ b/arch/mips/kernel/process.c
378 @@ -371,7 +371,7 @@ static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
379 static int get_frame_info(struct mips_frame_info *info)
380 {
381 bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
382 - union mips_instruction insn, *ip, *ip_end;
383 + union mips_instruction insn, *ip;
384 const unsigned int max_insns = 128;
385 unsigned int last_insn_size = 0;
386 unsigned int i;
387 @@ -384,10 +384,9 @@ static int get_frame_info(struct mips_frame_info *info)
388 if (!ip)
389 goto err;
390
391 - ip_end = (void *)ip + info->func_size;
392 -
393 - for (i = 0; i < max_insns && ip < ip_end; i++) {
394 + for (i = 0; i < max_insns; i++) {
395 ip = (void *)ip + last_insn_size;
396 +
397 if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
398 insn.word = ip->halfword[0] << 16;
399 last_insn_size = 2;
400 diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
401 index 3fe4af8147d2..c23578a37b44 100644
402 --- a/arch/riscv/include/asm/processor.h
403 +++ b/arch/riscv/include/asm/processor.h
404 @@ -22,7 +22,7 @@
405 * This decides where the kernel will search for a free chunk of vm
406 * space during mmap's.
407 */
408 -#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
409 +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
410
411 #define STACK_TOP TASK_SIZE
412 #define STACK_TOP_MAX STACK_TOP
413 diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
414 index b2d26d9d8489..9713d4e8c22b 100644
415 --- a/arch/riscv/kernel/setup.c
416 +++ b/arch/riscv/kernel/setup.c
417 @@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
418 BUG_ON(mem_size == 0);
419
420 set_max_mapnr(PFN_DOWN(mem_size));
421 - max_low_pfn = memblock_end_of_DRAM();
422 + max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
423
424 #ifdef CONFIG_BLK_DEV_INITRD
425 setup_initrd();
426 diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
427 index 58a522f9bcc3..200a4b315e15 100644
428 --- a/arch/riscv/mm/init.c
429 +++ b/arch/riscv/mm/init.c
430 @@ -29,7 +29,8 @@ static void __init zone_sizes_init(void)
431 unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
432
433 #ifdef CONFIG_ZONE_DMA32
434 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
435 + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
436 + (unsigned long) PFN_PHYS(max_low_pfn)));
437 #endif
438 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
439
440 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
441 index 64037895b085..f105ae8651c9 100644
442 --- a/arch/x86/boot/compressed/head_64.S
443 +++ b/arch/x86/boot/compressed/head_64.S
444 @@ -600,6 +600,14 @@ ENTRY(trampoline_32bit_src)
445 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
446 movl %eax, %cr3
447 3:
448 + /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
449 + pushl %ecx
450 + movl $MSR_EFER, %ecx
451 + rdmsr
452 + btsl $_EFER_LME, %eax
453 + wrmsr
454 + popl %ecx
455 +
456 /* Enable PAE and LA57 (if required) paging modes */
457 movl $X86_CR4_PAE, %eax
458 cmpl $0, %edx
459 diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
460 index 91f75638f6e6..6ff7e81b5628 100644
461 --- a/arch/x86/boot/compressed/pgtable.h
462 +++ b/arch/x86/boot/compressed/pgtable.h
463 @@ -6,7 +6,7 @@
464 #define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
465
466 #define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
467 -#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
468 +#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
469
470 #define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
471
472 diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
473 index c04a8813cff9..a41554350893 100644
474 --- a/arch/x86/events/core.c
475 +++ b/arch/x86/events/core.c
476 @@ -1970,7 +1970,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
477 */
478 static void free_fake_cpuc(struct cpu_hw_events *cpuc)
479 {
480 - kfree(cpuc->shared_regs);
481 + intel_cpuc_finish(cpuc);
482 kfree(cpuc);
483 }
484
485 @@ -1982,14 +1982,11 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
486 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
487 if (!cpuc)
488 return ERR_PTR(-ENOMEM);
489 -
490 - /* only needed, if we have extra_regs */
491 - if (x86_pmu.extra_regs) {
492 - cpuc->shared_regs = allocate_shared_regs(cpu);
493 - if (!cpuc->shared_regs)
494 - goto error;
495 - }
496 cpuc->is_fake = 1;
497 +
498 + if (intel_cpuc_prepare(cpuc, cpu))
499 + goto error;
500 +
501 return cpuc;
502 error:
503 free_fake_cpuc(cpuc);
504 diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
505 index fbd7551a8d44..220b40b75e6f 100644
506 --- a/arch/x86/events/intel/core.c
507 +++ b/arch/x86/events/intel/core.c
508 @@ -1995,6 +1995,39 @@ static void intel_pmu_nhm_enable_all(int added)
509 intel_pmu_enable_all(added);
510 }
511
512 +static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
513 +{
514 + u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
515 +
516 + if (cpuc->tfa_shadow != val) {
517 + cpuc->tfa_shadow = val;
518 + wrmsrl(MSR_TSX_FORCE_ABORT, val);
519 + }
520 +}
521 +
522 +static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
523 +{
524 + /*
525 + * We're going to use PMC3, make sure TFA is set before we touch it.
526 + */
527 + if (cntr == 3 && !cpuc->is_fake)
528 + intel_set_tfa(cpuc, true);
529 +}
530 +
531 +static void intel_tfa_pmu_enable_all(int added)
532 +{
533 + struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
534 +
535 + /*
536 + * If we find PMC3 is no longer used when we enable the PMU, we can
537 + * clear TFA.
538 + */
539 + if (!test_bit(3, cpuc->active_mask))
540 + intel_set_tfa(cpuc, false);
541 +
542 + intel_pmu_enable_all(added);
543 +}
544 +
545 static inline u64 intel_pmu_get_status(void)
546 {
547 u64 status;
548 @@ -2652,6 +2685,35 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
549 raw_spin_unlock(&excl_cntrs->lock);
550 }
551
552 +static struct event_constraint *
553 +dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
554 +{
555 + WARN_ON_ONCE(!cpuc->constraint_list);
556 +
557 + if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
558 + struct event_constraint *cx;
559 +
560 + /*
561 + * grab pre-allocated constraint entry
562 + */
563 + cx = &cpuc->constraint_list[idx];
564 +
565 + /*
566 + * initialize dynamic constraint
567 + * with static constraint
568 + */
569 + *cx = *c;
570 +
571 + /*
572 + * mark constraint as dynamic
573 + */
574 + cx->flags |= PERF_X86_EVENT_DYNAMIC;
575 + c = cx;
576 + }
577 +
578 + return c;
579 +}
580 +
581 static struct event_constraint *
582 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
583 int idx, struct event_constraint *c)
584 @@ -2682,27 +2744,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
585 * only needed when constraint has not yet
586 * been cloned (marked dynamic)
587 */
588 - if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
589 - struct event_constraint *cx;
590 -
591 - /*
592 - * grab pre-allocated constraint entry
593 - */
594 - cx = &cpuc->constraint_list[idx];
595 -
596 - /*
597 - * initialize dynamic constraint
598 - * with static constraint
599 - */
600 - *cx = *c;
601 -
602 - /*
603 - * mark constraint as dynamic, so we
604 - * can free it later on
605 - */
606 - cx->flags |= PERF_X86_EVENT_DYNAMIC;
607 - c = cx;
608 - }
609 + c = dyn_constraint(cpuc, c, idx);
610
611 /*
612 * From here on, the constraint is dynamic.
613 @@ -3229,6 +3271,26 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
614 return c;
615 }
616
617 +static bool allow_tsx_force_abort = true;
618 +
619 +static struct event_constraint *
620 +tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
621 + struct perf_event *event)
622 +{
623 + struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
624 +
625 + /*
626 + * Without TFA we must not use PMC3.
627 + */
628 + if (!allow_tsx_force_abort && test_bit(3, c->idxmsk)) {
629 + c = dyn_constraint(cpuc, c, idx);
630 + c->idxmsk64 &= ~(1ULL << 3);
631 + c->weight--;
632 + }
633 +
634 + return c;
635 +}
636 +
637 /*
638 * Broadwell:
639 *
640 @@ -3282,7 +3344,7 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
641 return x86_event_sysfs_show(page, config, event);
642 }
643
644 -struct intel_shared_regs *allocate_shared_regs(int cpu)
645 +static struct intel_shared_regs *allocate_shared_regs(int cpu)
646 {
647 struct intel_shared_regs *regs;
648 int i;
649 @@ -3314,23 +3376,24 @@ static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
650 return c;
651 }
652
653 -static int intel_pmu_cpu_prepare(int cpu)
654 -{
655 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
656
657 +int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
658 +{
659 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
660 cpuc->shared_regs = allocate_shared_regs(cpu);
661 if (!cpuc->shared_regs)
662 goto err;
663 }
664
665 - if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
666 + if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
667 size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
668
669 - cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
670 + cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
671 if (!cpuc->constraint_list)
672 goto err_shared_regs;
673 + }
674
675 + if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
676 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
677 if (!cpuc->excl_cntrs)
678 goto err_constraint_list;
679 @@ -3352,6 +3415,11 @@ err:
680 return -ENOMEM;
681 }
682
683 +static int intel_pmu_cpu_prepare(int cpu)
684 +{
685 + return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
686 +}
687 +
688 static void flip_smm_bit(void *data)
689 {
690 unsigned long set = *(unsigned long *)data;
691 @@ -3423,9 +3491,8 @@ static void intel_pmu_cpu_starting(int cpu)
692 }
693 }
694
695 -static void free_excl_cntrs(int cpu)
696 +static void free_excl_cntrs(struct cpu_hw_events *cpuc)
697 {
698 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
699 struct intel_excl_cntrs *c;
700
701 c = cpuc->excl_cntrs;
702 @@ -3433,9 +3500,10 @@ static void free_excl_cntrs(int cpu)
703 if (c->core_id == -1 || --c->refcnt == 0)
704 kfree(c);
705 cpuc->excl_cntrs = NULL;
706 - kfree(cpuc->constraint_list);
707 - cpuc->constraint_list = NULL;
708 }
709 +
710 + kfree(cpuc->constraint_list);
711 + cpuc->constraint_list = NULL;
712 }
713
714 static void intel_pmu_cpu_dying(int cpu)
715 @@ -3443,9 +3511,8 @@ static void intel_pmu_cpu_dying(int cpu)
716 fini_debug_store_on_cpu(cpu);
717 }
718
719 -static void intel_pmu_cpu_dead(int cpu)
720 +void intel_cpuc_finish(struct cpu_hw_events *cpuc)
721 {
722 - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
723 struct intel_shared_regs *pc;
724
725 pc = cpuc->shared_regs;
726 @@ -3455,7 +3522,12 @@ static void intel_pmu_cpu_dead(int cpu)
727 cpuc->shared_regs = NULL;
728 }
729
730 - free_excl_cntrs(cpu);
731 + free_excl_cntrs(cpuc);
732 +}
733 +
734 +static void intel_pmu_cpu_dead(int cpu)
735 +{
736 + intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
737 }
738
739 static void intel_pmu_sched_task(struct perf_event_context *ctx,
740 @@ -3917,8 +3989,11 @@ static struct attribute *intel_pmu_caps_attrs[] = {
741 NULL
742 };
743
744 +DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
745 +
746 static struct attribute *intel_pmu_attrs[] = {
747 &dev_attr_freeze_on_smi.attr,
748 + NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
749 NULL,
750 };
751
752 @@ -4374,6 +4449,15 @@ __init int intel_pmu_init(void)
753 x86_pmu.cpu_events = get_hsw_events_attrs();
754 intel_pmu_pebs_data_source_skl(
755 boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
756 +
757 + if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
758 + x86_pmu.flags |= PMU_FL_TFA;
759 + x86_pmu.get_event_constraints = tfa_get_event_constraints;
760 + x86_pmu.enable_all = intel_tfa_pmu_enable_all;
761 + x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
762 + intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
763 + }
764 +
765 pr_cont("Skylake events, ");
766 name = "skylake";
767 break;
768 @@ -4515,7 +4599,7 @@ static __init int fixup_ht_bug(void)
769 hardlockup_detector_perf_restart();
770
771 for_each_online_cpu(c)
772 - free_excl_cntrs(c);
773 + free_excl_cntrs(&per_cpu(cpu_hw_events, c));
774
775 cpus_read_unlock();
776 pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
777 diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
778 index 0ee3a441ad79..5c424009b71f 100644
779 --- a/arch/x86/events/perf_event.h
780 +++ b/arch/x86/events/perf_event.h
781 @@ -242,6 +242,11 @@ struct cpu_hw_events {
782 struct intel_excl_cntrs *excl_cntrs;
783 int excl_thread_id; /* 0 or 1 */
784
785 + /*
786 + * SKL TSX_FORCE_ABORT shadow
787 + */
788 + u64 tfa_shadow;
789 +
790 /*
791 * AMD specific bits
792 */
793 @@ -679,6 +684,7 @@ do { \
794 #define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
795 #define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
796 #define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
797 +#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
798
799 #define EVENT_VAR(_id) event_attr_##_id
800 #define EVENT_PTR(_id) &event_attr_##_id.attr.attr
801 @@ -887,7 +893,8 @@ struct event_constraint *
802 x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
803 struct perf_event *event);
804
805 -struct intel_shared_regs *allocate_shared_regs(int cpu);
806 +extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
807 +extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
808
809 int intel_pmu_init(void);
810
811 @@ -1023,9 +1030,13 @@ static inline int intel_pmu_init(void)
812 return 0;
813 }
814
815 -static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
816 +static inline int intel_cpuc_prepare(struct cpu_hw_event *cpuc, int cpu)
817 +{
818 + return 0;
819 +}
820 +
821 +static inline void intel_cpuc_finish(struct cpu_hw_event *cpuc)
822 {
823 - return NULL;
824 }
825
826 static inline int is_ht_workaround_enabled(void)
827 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
828 index 89a048c2faec..7b31ee5223fc 100644
829 --- a/arch/x86/include/asm/cpufeatures.h
830 +++ b/arch/x86/include/asm/cpufeatures.h
831 @@ -340,6 +340,7 @@
832 /* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
833 #define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
834 #define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
835 +#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
836 #define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
837 #define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
838 #define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
839 diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
840 index 1f9de7635bcb..f14ca0be1e3f 100644
841 --- a/arch/x86/include/asm/msr-index.h
842 +++ b/arch/x86/include/asm/msr-index.h
843 @@ -629,6 +629,12 @@
844
845 #define MSR_IA32_TSC_DEADLINE 0x000006E0
846
847 +
848 +#define MSR_TSX_FORCE_ABORT 0x0000010F
849 +
850 +#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
851 +#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
852 +
853 /* P4/Xeon+ specific */
854 #define MSR_IA32_MCG_EAX 0x00000180
855 #define MSR_IA32_MCG_EBX 0x00000181
856 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
857 index b99d497e342d..0b6352aabbd3 100644
858 --- a/arch/x86/include/asm/page_64_types.h
859 +++ b/arch/x86/include/asm/page_64_types.h
860 @@ -7,7 +7,11 @@
861 #endif
862
863 #ifdef CONFIG_KASAN
864 +#ifdef CONFIG_KASAN_EXTRA
865 +#define KASAN_STACK_ORDER 2
866 +#else
867 #define KASAN_STACK_ORDER 1
868 +#endif
869 #else
870 #define KASAN_STACK_ORDER 0
871 #endif
872 diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
873 index 07b5fc00b188..a4e7e100ed26 100644
874 --- a/arch/x86/kernel/cpu/microcode/amd.c
875 +++ b/arch/x86/kernel/cpu/microcode/amd.c
876 @@ -707,7 +707,7 @@ load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
877 if (!p) {
878 return ret;
879 } else {
880 - if (boot_cpu_data.microcode == p->patch_id)
881 + if (boot_cpu_data.microcode >= p->patch_id)
882 return ret;
883
884 ret = UCODE_NEW;
885 diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
886 index 278cd07228dd..9490a2845f14 100644
887 --- a/arch/x86/kernel/kexec-bzimage64.c
888 +++ b/arch/x86/kernel/kexec-bzimage64.c
889 @@ -167,6 +167,9 @@ setup_efi_state(struct boot_params *params, unsigned long params_load_addr,
890 struct efi_info *current_ei = &boot_params.efi_info;
891 struct efi_info *ei = &params->efi_info;
892
893 + if (!efi_enabled(EFI_RUNTIME_SERVICES))
894 + return 0;
895 +
896 if (!current_ei->efi_memmap_size)
897 return 0;
898
899 diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
900 index 13f4485ca388..bd372e896557 100644
901 --- a/arch/x86/pci/fixup.c
902 +++ b/arch/x86/pci/fixup.c
903 @@ -641,6 +641,22 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334b, quirk_no_aersid);
904 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
905 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
906
907 +static void quirk_intel_th_dnv(struct pci_dev *dev)
908 +{
909 + struct resource *r = &dev->resource[4];
910 +
911 + /*
912 + * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
913 + * appears to be 4 MB in reality.
914 + */
915 + if (r->end == r->start + 0x7ff) {
916 + r->start = 0;
917 + r->end = 0x3fffff;
918 + r->flags |= IORESOURCE_UNSET;
919 + }
920 +}
921 +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
922 +
923 #ifdef CONFIG_PHYS_ADDR_T_64BIT
924
925 #define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
926 diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
927 index 11fed6c06a7c..b5938160fb3d 100644
928 --- a/arch/xtensa/configs/smp_lx200_defconfig
929 +++ b/arch/xtensa/configs/smp_lx200_defconfig
930 @@ -33,6 +33,7 @@ CONFIG_SMP=y
931 CONFIG_HOTPLUG_CPU=y
932 # CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
933 # CONFIG_PCI is not set
934 +CONFIG_VECTORS_OFFSET=0x00002000
935 CONFIG_XTENSA_PLATFORM_XTFPGA=y
936 CONFIG_CMDLINE_BOOL=y
937 CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
938 diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
939 index 9053a5622d2c..5bd38ea2da38 100644
940 --- a/arch/xtensa/kernel/head.S
941 +++ b/arch/xtensa/kernel/head.S
942 @@ -280,12 +280,13 @@ should_never_return:
943
944 movi a2, cpu_start_ccount
945 1:
946 + memw
947 l32i a3, a2, 0
948 beqi a3, 0, 1b
949 movi a3, 0
950 s32i a3, a2, 0
951 - memw
952 1:
953 + memw
954 l32i a3, a2, 0
955 beqi a3, 0, 1b
956 wsr a3, ccount
957 @@ -321,11 +322,13 @@ ENTRY(cpu_restart)
958 rsr a0, prid
959 neg a2, a0
960 movi a3, cpu_start_id
961 + memw
962 s32i a2, a3, 0
963 #if XCHAL_DCACHE_IS_WRITEBACK
964 dhwbi a3, 0
965 #endif
966 1:
967 + memw
968 l32i a2, a3, 0
969 dhi a3, 0
970 bne a2, a0, 1b
971 diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
972 index 932d64689bac..be1f280c322c 100644
973 --- a/arch/xtensa/kernel/smp.c
974 +++ b/arch/xtensa/kernel/smp.c
975 @@ -83,7 +83,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
976 {
977 unsigned i;
978
979 - for (i = 0; i < max_cpus; ++i)
980 + for_each_possible_cpu(i)
981 set_cpu_present(i, true);
982 }
983
984 @@ -96,6 +96,11 @@ void __init smp_init_cpus(void)
985 pr_info("%s: Core Count = %d\n", __func__, ncpus);
986 pr_info("%s: Core Id = %d\n", __func__, core_id);
987
988 + if (ncpus > NR_CPUS) {
989 + ncpus = NR_CPUS;
990 + pr_info("%s: limiting core count by %d\n", __func__, ncpus);
991 + }
992 +
993 for (i = 0; i < ncpus; ++i)
994 set_cpu_possible(i, true);
995 }
996 @@ -195,9 +200,11 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
997 int i;
998
999 #ifdef CONFIG_HOTPLUG_CPU
1000 - cpu_start_id = cpu;
1001 - system_flush_invalidate_dcache_range(
1002 - (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
1003 + WRITE_ONCE(cpu_start_id, cpu);
1004 + /* Pairs with the third memw in the cpu_restart */
1005 + mb();
1006 + system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
1007 + sizeof(cpu_start_id));
1008 #endif
1009 smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
1010
1011 @@ -206,18 +213,21 @@ static int boot_secondary(unsigned int cpu, struct task_struct *ts)
1012 ccount = get_ccount();
1013 while (!ccount);
1014
1015 - cpu_start_ccount = ccount;
1016 + WRITE_ONCE(cpu_start_ccount, ccount);
1017
1018 - while (time_before(jiffies, timeout)) {
1019 + do {
1020 + /*
1021 + * Pairs with the first two memws in the
1022 + * .Lboot_secondary.
1023 + */
1024 mb();
1025 - if (!cpu_start_ccount)
1026 - break;
1027 - }
1028 + ccount = READ_ONCE(cpu_start_ccount);
1029 + } while (ccount && time_before(jiffies, timeout));
1030
1031 - if (cpu_start_ccount) {
1032 + if (ccount) {
1033 smp_call_function_single(0, mx_cpu_stop,
1034 - (void *)cpu, 1);
1035 - cpu_start_ccount = 0;
1036 + (void *)cpu, 1);
1037 + WRITE_ONCE(cpu_start_ccount, 0);
1038 return -EIO;
1039 }
1040 }
1041 @@ -237,6 +247,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
1042 pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
1043 __func__, cpu, idle, start_info.stack);
1044
1045 + init_completion(&cpu_running);
1046 ret = boot_secondary(cpu, idle);
1047 if (ret == 0) {
1048 wait_for_completion_timeout(&cpu_running,
1049 @@ -298,8 +309,10 @@ void __cpu_die(unsigned int cpu)
1050 unsigned long timeout = jiffies + msecs_to_jiffies(1000);
1051 while (time_before(jiffies, timeout)) {
1052 system_invalidate_dcache_range((unsigned long)&cpu_start_id,
1053 - sizeof(cpu_start_id));
1054 - if (cpu_start_id == -cpu) {
1055 + sizeof(cpu_start_id));
1056 + /* Pairs with the second memw in the cpu_restart */
1057 + mb();
1058 + if (READ_ONCE(cpu_start_id) == -cpu) {
1059 platform_cpu_kill(cpu);
1060 return;
1061 }
1062 diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
1063 index fd524a54d2ab..378186b5eb40 100644
1064 --- a/arch/xtensa/kernel/time.c
1065 +++ b/arch/xtensa/kernel/time.c
1066 @@ -89,7 +89,7 @@ static int ccount_timer_shutdown(struct clock_event_device *evt)
1067 container_of(evt, struct ccount_timer, evt);
1068
1069 if (timer->irq_enabled) {
1070 - disable_irq(evt->irq);
1071 + disable_irq_nosync(evt->irq);
1072 timer->irq_enabled = 0;
1073 }
1074 return 0;
1075 diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
1076 index 19923f8a029d..b154e057ca67 100644
1077 --- a/block/blk-iolatency.c
1078 +++ b/block/blk-iolatency.c
1079 @@ -72,6 +72,7 @@
1080 #include <linux/sched/loadavg.h>
1081 #include <linux/sched/signal.h>
1082 #include <trace/events/block.h>
1083 +#include <linux/blk-mq.h>
1084 #include "blk-rq-qos.h"
1085 #include "blk-stat.h"
1086
1087 @@ -568,6 +569,9 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
1088 return;
1089
1090 enabled = blk_iolatency_enabled(iolat->blkiolat);
1091 + if (!enabled)
1092 + return;
1093 +
1094 while (blkg && blkg->parent) {
1095 iolat = blkg_to_lat(blkg);
1096 if (!iolat) {
1097 @@ -577,7 +581,7 @@ static void blkcg_iolatency_done_bio(struct rq_qos *rqos, struct bio *bio)
1098 rqw = &iolat->rq_wait;
1099
1100 atomic_dec(&rqw->inflight);
1101 - if (!enabled || iolat->min_lat_nsec == 0)
1102 + if (iolat->min_lat_nsec == 0)
1103 goto next;
1104 iolatency_record_time(iolat, &bio->bi_issue, now,
1105 issue_as_root);
1106 @@ -721,10 +725,13 @@ int blk_iolatency_init(struct request_queue *q)
1107 return 0;
1108 }
1109
1110 -static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
1111 +/*
1112 + * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
1113 + * return 0.
1114 + */
1115 +static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
1116 {
1117 struct iolatency_grp *iolat = blkg_to_lat(blkg);
1118 - struct blk_iolatency *blkiolat = iolat->blkiolat;
1119 u64 oldval = iolat->min_lat_nsec;
1120
1121 iolat->min_lat_nsec = val;
1122 @@ -733,9 +740,10 @@ static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
1123 BLKIOLATENCY_MAX_WIN_SIZE);
1124
1125 if (!oldval && val)
1126 - atomic_inc(&blkiolat->enabled);
1127 + return 1;
1128 if (oldval && !val)
1129 - atomic_dec(&blkiolat->enabled);
1130 + return -1;
1131 + return 0;
1132 }
1133
1134 static void iolatency_clear_scaling(struct blkcg_gq *blkg)
1135 @@ -768,6 +776,7 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
1136 u64 lat_val = 0;
1137 u64 oldval;
1138 int ret;
1139 + int enable = 0;
1140
1141 ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
1142 if (ret)
1143 @@ -803,7 +812,12 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
1144 blkg = ctx.blkg;
1145 oldval = iolat->min_lat_nsec;
1146
1147 - iolatency_set_min_lat_nsec(blkg, lat_val);
1148 + enable = iolatency_set_min_lat_nsec(blkg, lat_val);
1149 + if (enable) {
1150 + WARN_ON_ONCE(!blk_get_queue(blkg->q));
1151 + blkg_get(blkg);
1152 + }
1153 +
1154 if (oldval != iolat->min_lat_nsec) {
1155 iolatency_clear_scaling(blkg);
1156 }
1157 @@ -811,6 +825,24 @@ static ssize_t iolatency_set_limit(struct kernfs_open_file *of, char *buf,
1158 ret = 0;
1159 out:
1160 blkg_conf_finish(&ctx);
1161 + if (ret == 0 && enable) {
1162 + struct iolatency_grp *tmp = blkg_to_lat(blkg);
1163 + struct blk_iolatency *blkiolat = tmp->blkiolat;
1164 +
1165 + blk_mq_freeze_queue(blkg->q);
1166 +
1167 + if (enable == 1)
1168 + atomic_inc(&blkiolat->enabled);
1169 + else if (enable == -1)
1170 + atomic_dec(&blkiolat->enabled);
1171 + else
1172 + WARN_ON_ONCE(1);
1173 +
1174 + blk_mq_unfreeze_queue(blkg->q);
1175 +
1176 + blkg_put(blkg);
1177 + blk_put_queue(blkg->q);
1178 + }
1179 return ret ?: nbytes;
1180 }
1181
1182 @@ -910,8 +942,14 @@ static void iolatency_pd_offline(struct blkg_policy_data *pd)
1183 {
1184 struct iolatency_grp *iolat = pd_to_lat(pd);
1185 struct blkcg_gq *blkg = lat_to_blkg(iolat);
1186 + struct blk_iolatency *blkiolat = iolat->blkiolat;
1187 + int ret;
1188
1189 - iolatency_set_min_lat_nsec(blkg, 0);
1190 + ret = iolatency_set_min_lat_nsec(blkg, 0);
1191 + if (ret == 1)
1192 + atomic_inc(&blkiolat->enabled);
1193 + if (ret == -1)
1194 + atomic_dec(&blkiolat->enabled);
1195 iolatency_clear_scaling(blkg);
1196 }
1197
1198 diff --git a/drivers/base/dd.c b/drivers/base/dd.c
1199 index 7caa1adaf62a..f5b74856784a 100644
1200 --- a/drivers/base/dd.c
1201 +++ b/drivers/base/dd.c
1202 @@ -963,9 +963,9 @@ static void __device_release_driver(struct device *dev, struct device *parent)
1203 drv->remove(dev);
1204
1205 device_links_driver_cleanup(dev);
1206 - dma_deconfigure(dev);
1207
1208 devres_release_all(dev);
1209 + dma_deconfigure(dev);
1210 dev->driver = NULL;
1211 dev_set_drvdata(dev, NULL);
1212 if (dev->pm_domain && dev->pm_domain->dismiss)
1213 diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
1214 index fa1a196350f1..3bf11a620094 100644
1215 --- a/drivers/clk/qcom/gcc-sdm845.c
1216 +++ b/drivers/clk/qcom/gcc-sdm845.c
1217 @@ -131,8 +131,8 @@ static const char * const gcc_parent_names_6[] = {
1218 "core_bi_pll_test_se",
1219 };
1220
1221 -static const char * const gcc_parent_names_7[] = {
1222 - "bi_tcxo",
1223 +static const char * const gcc_parent_names_7_ao[] = {
1224 + "bi_tcxo_ao",
1225 "gpll0",
1226 "gpll0_out_even",
1227 "core_bi_pll_test_se",
1228 @@ -144,6 +144,12 @@ static const char * const gcc_parent_names_8[] = {
1229 "core_bi_pll_test_se",
1230 };
1231
1232 +static const char * const gcc_parent_names_8_ao[] = {
1233 + "bi_tcxo_ao",
1234 + "gpll0",
1235 + "core_bi_pll_test_se",
1236 +};
1237 +
1238 static const struct parent_map gcc_parent_map_10[] = {
1239 { P_BI_TCXO, 0 },
1240 { P_GPLL0_OUT_MAIN, 1 },
1241 @@ -226,7 +232,7 @@ static struct clk_rcg2 gcc_cpuss_ahb_clk_src = {
1242 .freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
1243 .clkr.hw.init = &(struct clk_init_data){
1244 .name = "gcc_cpuss_ahb_clk_src",
1245 - .parent_names = gcc_parent_names_7,
1246 + .parent_names = gcc_parent_names_7_ao,
1247 .num_parents = 4,
1248 .ops = &clk_rcg2_ops,
1249 },
1250 @@ -245,7 +251,7 @@ static struct clk_rcg2 gcc_cpuss_rbcpr_clk_src = {
1251 .freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
1252 .clkr.hw.init = &(struct clk_init_data){
1253 .name = "gcc_cpuss_rbcpr_clk_src",
1254 - .parent_names = gcc_parent_names_8,
1255 + .parent_names = gcc_parent_names_8_ao,
1256 .num_parents = 3,
1257 .ops = &clk_rcg2_ops,
1258 },
1259 diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
1260 index ccfb4d9a152a..079f0beda8b6 100644
1261 --- a/drivers/clk/ti/divider.c
1262 +++ b/drivers/clk/ti/divider.c
1263 @@ -367,8 +367,10 @@ int ti_clk_parse_divider_data(int *div_table, int num_dividers, int max_div,
1264 num_dividers = i;
1265
1266 tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
1267 - if (!tmp)
1268 + if (!tmp) {
1269 + *table = ERR_PTR(-ENOMEM);
1270 return -ENOMEM;
1271 + }
1272
1273 valid_div = 0;
1274 *width = 0;
1275 @@ -403,6 +405,7 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
1276 {
1277 struct clk_omap_divider *div;
1278 struct clk_omap_reg *reg;
1279 + int ret;
1280
1281 if (!setup)
1282 return NULL;
1283 @@ -422,6 +425,12 @@ struct clk_hw *ti_clk_build_component_div(struct ti_clk_divider *setup)
1284 div->flags |= CLK_DIVIDER_POWER_OF_TWO;
1285
1286 div->table = _get_div_table_from_setup(setup, &div->width);
1287 + if (IS_ERR(div->table)) {
1288 + ret = PTR_ERR(div->table);
1289 + kfree(div);
1290 + return ERR_PTR(ret);
1291 + }
1292 +
1293
1294 div->shift = setup->bit_shift;
1295 div->latch = -EINVAL;
1296 diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1297 index 4bf72561667c..a75b95fac3bd 100644
1298 --- a/drivers/dma/at_xdmac.c
1299 +++ b/drivers/dma/at_xdmac.c
1300 @@ -203,6 +203,7 @@ struct at_xdmac_chan {
1301 u32 save_cim;
1302 u32 save_cnda;
1303 u32 save_cndc;
1304 + u32 irq_status;
1305 unsigned long status;
1306 struct tasklet_struct tasklet;
1307 struct dma_slave_config sconfig;
1308 @@ -1580,8 +1581,8 @@ static void at_xdmac_tasklet(unsigned long data)
1309 struct at_xdmac_desc *desc;
1310 u32 error_mask;
1311
1312 - dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
1313 - __func__, atchan->status);
1314 + dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1315 + __func__, atchan->irq_status);
1316
1317 error_mask = AT_XDMAC_CIS_RBEIS
1318 | AT_XDMAC_CIS_WBEIS
1319 @@ -1589,15 +1590,15 @@ static void at_xdmac_tasklet(unsigned long data)
1320
1321 if (at_xdmac_chan_is_cyclic(atchan)) {
1322 at_xdmac_handle_cyclic(atchan);
1323 - } else if ((atchan->status & AT_XDMAC_CIS_LIS)
1324 - || (atchan->status & error_mask)) {
1325 + } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1326 + || (atchan->irq_status & error_mask)) {
1327 struct dma_async_tx_descriptor *txd;
1328
1329 - if (atchan->status & AT_XDMAC_CIS_RBEIS)
1330 + if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1331 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1332 - if (atchan->status & AT_XDMAC_CIS_WBEIS)
1333 + if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1334 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1335 - if (atchan->status & AT_XDMAC_CIS_ROIS)
1336 + if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1337 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1338
1339 spin_lock_bh(&atchan->lock);
1340 @@ -1652,7 +1653,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1341 atchan = &atxdmac->chan[i];
1342 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1343 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1344 - atchan->status = chan_status & chan_imr;
1345 + atchan->irq_status = chan_status & chan_imr;
1346 dev_vdbg(atxdmac->dma.dev,
1347 "%s: chan%d: imr=0x%x, status=0x%x\n",
1348 __func__, i, chan_imr, chan_status);
1349 @@ -1666,7 +1667,7 @@ static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1350 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1351 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1352
1353 - if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1354 + if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1355 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1356
1357 tasklet_schedule(&atchan->tasklet);
1358 diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
1359 index aa1712beb0cc..7b7fba0c9253 100644
1360 --- a/drivers/dma/dmatest.c
1361 +++ b/drivers/dma/dmatest.c
1362 @@ -642,11 +642,9 @@ static int dmatest_func(void *data)
1363 srcs[i] = um->addr[i] + src_off;
1364 ret = dma_mapping_error(dev->dev, um->addr[i]);
1365 if (ret) {
1366 - dmaengine_unmap_put(um);
1367 result("src mapping error", total_tests,
1368 src_off, dst_off, len, ret);
1369 - failed_tests++;
1370 - continue;
1371 + goto error_unmap_continue;
1372 }
1373 um->to_cnt++;
1374 }
1375 @@ -661,11 +659,9 @@ static int dmatest_func(void *data)
1376 DMA_BIDIRECTIONAL);
1377 ret = dma_mapping_error(dev->dev, dsts[i]);
1378 if (ret) {
1379 - dmaengine_unmap_put(um);
1380 result("dst mapping error", total_tests,
1381 src_off, dst_off, len, ret);
1382 - failed_tests++;
1383 - continue;
1384 + goto error_unmap_continue;
1385 }
1386 um->bidi_cnt++;
1387 }
1388 @@ -693,12 +689,10 @@ static int dmatest_func(void *data)
1389 }
1390
1391 if (!tx) {
1392 - dmaengine_unmap_put(um);
1393 result("prep error", total_tests, src_off,
1394 dst_off, len, ret);
1395 msleep(100);
1396 - failed_tests++;
1397 - continue;
1398 + goto error_unmap_continue;
1399 }
1400
1401 done->done = false;
1402 @@ -707,12 +701,10 @@ static int dmatest_func(void *data)
1403 cookie = tx->tx_submit(tx);
1404
1405 if (dma_submit_error(cookie)) {
1406 - dmaengine_unmap_put(um);
1407 result("submit error", total_tests, src_off,
1408 dst_off, len, ret);
1409 msleep(100);
1410 - failed_tests++;
1411 - continue;
1412 + goto error_unmap_continue;
1413 }
1414 dma_async_issue_pending(chan);
1415
1416 @@ -725,16 +717,14 @@ static int dmatest_func(void *data)
1417 dmaengine_unmap_put(um);
1418 result("test timed out", total_tests, src_off, dst_off,
1419 len, 0);
1420 - failed_tests++;
1421 - continue;
1422 + goto error_unmap_continue;
1423 } else if (status != DMA_COMPLETE) {
1424 dmaengine_unmap_put(um);
1425 result(status == DMA_ERROR ?
1426 "completion error status" :
1427 "completion busy status", total_tests, src_off,
1428 dst_off, len, ret);
1429 - failed_tests++;
1430 - continue;
1431 + goto error_unmap_continue;
1432 }
1433
1434 dmaengine_unmap_put(um);
1435 @@ -779,6 +769,12 @@ static int dmatest_func(void *data)
1436 verbose_result("test passed", total_tests, src_off,
1437 dst_off, len, 0);
1438 }
1439 +
1440 + continue;
1441 +
1442 +error_unmap_continue:
1443 + dmaengine_unmap_put(um);
1444 + failed_tests++;
1445 }
1446 ktime = ktime_sub(ktime_get(), ktime);
1447 ktime = ktime_sub(ktime, comparetime);
1448 diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
1449 index 6bc8e6640d71..c51462f5aa1e 100644
1450 --- a/drivers/firmware/iscsi_ibft.c
1451 +++ b/drivers/firmware/iscsi_ibft.c
1452 @@ -542,6 +542,7 @@ static umode_t __init ibft_check_tgt_for(void *data, int type)
1453 case ISCSI_BOOT_TGT_NIC_ASSOC:
1454 case ISCSI_BOOT_TGT_CHAP_TYPE:
1455 rc = S_IRUGO;
1456 + break;
1457 case ISCSI_BOOT_TGT_NAME:
1458 if (tgt->tgt_name_len)
1459 rc = S_IRUGO;
1460 diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
1461 index d4ad6d0e02a2..7e09ce75ffb2 100644
1462 --- a/drivers/gpio/gpio-vf610.c
1463 +++ b/drivers/gpio/gpio-vf610.c
1464 @@ -259,6 +259,7 @@ static int vf610_gpio_probe(struct platform_device *pdev)
1465 struct vf610_gpio_port *port;
1466 struct resource *iores;
1467 struct gpio_chip *gc;
1468 + int i;
1469 int ret;
1470
1471 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
1472 @@ -298,6 +299,10 @@ static int vf610_gpio_probe(struct platform_device *pdev)
1473 if (ret < 0)
1474 return ret;
1475
1476 + /* Mask all GPIO interrupts */
1477 + for (i = 0; i < gc->ngpio; i++)
1478 + vf610_gpio_writel(0, port->base + PORT_PCR(i));
1479 +
1480 /* Clear the interrupt status register for all GPIO's */
1481 vf610_gpio_writel(~0, port->base + PORT_ISFR);
1482
1483 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1484 index 7b4e657a95c7..c3df75a9f65d 100644
1485 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1486 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
1487 @@ -1443,7 +1443,8 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
1488 effective_mode &= ~S_IWUSR;
1489
1490 if ((adev->flags & AMD_IS_APU) &&
1491 - (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1492 + (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
1493 + attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
1494 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
1495 attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
1496 return 0;
1497 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
1498 index 1c5d97f4b4dd..8dcf6227ab99 100644
1499 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
1500 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
1501 @@ -37,6 +37,7 @@
1502 #include "amdgpu_display.h"
1503 #include <drm/amdgpu_drm.h>
1504 #include <linux/dma-buf.h>
1505 +#include <linux/dma-fence-array.h>
1506
1507 static const struct dma_buf_ops amdgpu_dmabuf_ops;
1508
1509 @@ -188,6 +189,48 @@ error:
1510 return ERR_PTR(ret);
1511 }
1512
1513 +static int
1514 +__reservation_object_make_exclusive(struct reservation_object *obj)
1515 +{
1516 + struct dma_fence **fences;
1517 + unsigned int count;
1518 + int r;
1519 +
1520 + if (!reservation_object_get_list(obj)) /* no shared fences to convert */
1521 + return 0;
1522 +
1523 + r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
1524 + if (r)
1525 + return r;
1526 +
1527 + if (count == 0) {
1528 + /* Now that was unexpected. */
1529 + } else if (count == 1) {
1530 + reservation_object_add_excl_fence(obj, fences[0]);
1531 + dma_fence_put(fences[0]);
1532 + kfree(fences);
1533 + } else {
1534 + struct dma_fence_array *array;
1535 +
1536 + array = dma_fence_array_create(count, fences,
1537 + dma_fence_context_alloc(1), 0,
1538 + false);
1539 + if (!array)
1540 + goto err_fences_put;
1541 +
1542 + reservation_object_add_excl_fence(obj, &array->base);
1543 + dma_fence_put(&array->base);
1544 + }
1545 +
1546 + return 0;
1547 +
1548 +err_fences_put:
1549 + while (count--)
1550 + dma_fence_put(fences[count]);
1551 + kfree(fences);
1552 + return -ENOMEM;
1553 +}
1554 +
1555 /**
1556 * amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
1557 * @dma_buf: shared DMA buffer
1558 @@ -219,16 +262,16 @@ static int amdgpu_gem_map_attach(struct dma_buf *dma_buf,
1559
1560 if (attach->dev->driver != adev->dev->driver) {
1561 /*
1562 - * Wait for all shared fences to complete before we switch to future
1563 - * use of exclusive fence on this prime shared bo.
1564 + * We only create shared fences for internal use, but importers
1565 + * of the dmabuf rely on exclusive fences for implicitly
1566 + * tracking write hazards. As any of the current fences may
1567 + * correspond to a write, we need to convert all existing
1568 + * fences on the reservation object into a single exclusive
1569 + * fence.
1570 */
1571 - r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
1572 - true, false,
1573 - MAX_SCHEDULE_TIMEOUT);
1574 - if (unlikely(r < 0)) {
1575 - DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
1576 + r = __reservation_object_make_exclusive(bo->tbo.resv);
1577 + if (r)
1578 goto error_unreserve;
1579 - }
1580 }
1581
1582 /* pin buffer into GTT */
1583 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1584 index 6a84526e20e0..49fe5084c53d 100644
1585 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1586 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
1587 @@ -3011,14 +3011,15 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
1588 struct amdgpu_task_info *task_info)
1589 {
1590 struct amdgpu_vm *vm;
1591 + unsigned long flags;
1592
1593 - spin_lock(&adev->vm_manager.pasid_lock);
1594 + spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
1595
1596 vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
1597 if (vm)
1598 *task_info = vm->task_info;
1599
1600 - spin_unlock(&adev->vm_manager.pasid_lock);
1601 + spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
1602 }
1603
1604 /**
1605 diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
1606 index d587779a80b4..a97294ac96d5 100644
1607 --- a/drivers/gpu/drm/radeon/ci_dpm.c
1608 +++ b/drivers/gpu/drm/radeon/ci_dpm.c
1609 @@ -5676,7 +5676,7 @@ int ci_dpm_init(struct radeon_device *rdev)
1610 u16 data_offset, size;
1611 u8 frev, crev;
1612 struct ci_power_info *pi;
1613 - enum pci_bus_speed speed_cap;
1614 + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
1615 struct pci_dev *root = rdev->pdev->bus->self;
1616 int ret;
1617
1618 @@ -5685,7 +5685,8 @@ int ci_dpm_init(struct radeon_device *rdev)
1619 return -ENOMEM;
1620 rdev->pm.dpm.priv = pi;
1621
1622 - speed_cap = pcie_get_speed_cap(root);
1623 + if (!pci_is_root_bus(rdev->pdev->bus))
1624 + speed_cap = pcie_get_speed_cap(root);
1625 if (speed_cap == PCI_SPEED_UNKNOWN) {
1626 pi->sys_pcie_mask = 0;
1627 } else {
1628 diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1629 index 8fb60b3af015..0a785ef0ab66 100644
1630 --- a/drivers/gpu/drm/radeon/si_dpm.c
1631 +++ b/drivers/gpu/drm/radeon/si_dpm.c
1632 @@ -6899,7 +6899,7 @@ int si_dpm_init(struct radeon_device *rdev)
1633 struct ni_power_info *ni_pi;
1634 struct si_power_info *si_pi;
1635 struct atom_clock_dividers dividers;
1636 - enum pci_bus_speed speed_cap;
1637 + enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
1638 struct pci_dev *root = rdev->pdev->bus->self;
1639 int ret;
1640
1641 @@ -6911,7 +6911,8 @@ int si_dpm_init(struct radeon_device *rdev)
1642 eg_pi = &ni_pi->eg;
1643 pi = &eg_pi->rv7xx;
1644
1645 - speed_cap = pcie_get_speed_cap(root);
1646 + if (!pci_is_root_bus(rdev->pdev->bus))
1647 + speed_cap = pcie_get_speed_cap(root);
1648 if (speed_cap == PCI_SPEED_UNKNOWN) {
1649 si_pi->sys_pcie_mask = 0;
1650 } else {
1651 diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1652 index 3fb084f802e2..8c31c9ab06f8 100644
1653 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
1654 +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
1655 @@ -672,6 +672,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
1656 return PTR_ERR(tcon->sclk0);
1657 }
1658 }
1659 + clk_prepare_enable(tcon->sclk0);
1660
1661 if (tcon->quirks->has_channel_1) {
1662 tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
1663 @@ -686,6 +687,7 @@ static int sun4i_tcon_init_clocks(struct device *dev,
1664
1665 static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
1666 {
1667 + clk_disable_unprepare(tcon->sclk0);
1668 clk_disable_unprepare(tcon->clk);
1669 }
1670
1671 diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
1672 index 65d06a819307..2ac86096ddd9 100644
1673 --- a/drivers/i2c/busses/i2c-omap.c
1674 +++ b/drivers/i2c/busses/i2c-omap.c
1675 @@ -1498,8 +1498,7 @@ static int omap_i2c_remove(struct platform_device *pdev)
1676 return 0;
1677 }
1678
1679 -#ifdef CONFIG_PM
1680 -static int omap_i2c_runtime_suspend(struct device *dev)
1681 +static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
1682 {
1683 struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1684
1685 @@ -1525,7 +1524,7 @@ static int omap_i2c_runtime_suspend(struct device *dev)
1686 return 0;
1687 }
1688
1689 -static int omap_i2c_runtime_resume(struct device *dev)
1690 +static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
1691 {
1692 struct omap_i2c_dev *omap = dev_get_drvdata(dev);
1693
1694 @@ -1540,20 +1539,18 @@ static int omap_i2c_runtime_resume(struct device *dev)
1695 }
1696
1697 static const struct dev_pm_ops omap_i2c_pm_ops = {
1698 + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1699 + pm_runtime_force_resume)
1700 SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
1701 omap_i2c_runtime_resume, NULL)
1702 };
1703 -#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
1704 -#else
1705 -#define OMAP_I2C_PM_OPS NULL
1706 -#endif /* CONFIG_PM */
1707
1708 static struct platform_driver omap_i2c_driver = {
1709 .probe = omap_i2c_probe,
1710 .remove = omap_i2c_remove,
1711 .driver = {
1712 .name = "omap_i2c",
1713 - .pm = OMAP_I2C_PM_OPS,
1714 + .pm = &omap_i2c_pm_ops,
1715 .of_match_table = of_match_ptr(omap_i2c_of_match),
1716 },
1717 };
1718 diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
1719 index 70d39fc450a1..54eb69564264 100644
1720 --- a/drivers/infiniband/hw/hfi1/ud.c
1721 +++ b/drivers/infiniband/hw/hfi1/ud.c
1722 @@ -980,7 +980,6 @@ void hfi1_ud_rcv(struct hfi1_packet *packet)
1723 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
1724 wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
1725 wc.wc_flags = IB_WC_WITH_IMM;
1726 - tlen -= sizeof(u32);
1727 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
1728 wc.ex.imm_data = 0;
1729 wc.wc_flags = 0;
1730 diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
1731 index f8d029a2390f..bce2b5cd3c7b 100644
1732 --- a/drivers/infiniband/hw/qib/qib_ud.c
1733 +++ b/drivers/infiniband/hw/qib/qib_ud.c
1734 @@ -513,7 +513,6 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct ib_header *hdr,
1735 opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
1736 wc.ex.imm_data = ohdr->u.ud.imm_data;
1737 wc.wc_flags = IB_WC_WITH_IMM;
1738 - tlen -= sizeof(u32);
1739 } else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
1740 wc.ex.imm_data = 0;
1741 wc.wc_flags = 0;
1742 diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
1743 index 1abe3c62f106..b22d02c9de90 100644
1744 --- a/drivers/infiniband/ulp/ipoib/ipoib.h
1745 +++ b/drivers/infiniband/ulp/ipoib/ipoib.h
1746 @@ -248,7 +248,6 @@ struct ipoib_cm_tx {
1747 struct list_head list;
1748 struct net_device *dev;
1749 struct ipoib_neigh *neigh;
1750 - struct ipoib_path *path;
1751 struct ipoib_tx_buf *tx_ring;
1752 unsigned int tx_head;
1753 unsigned int tx_tail;
1754 diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1755 index 0428e01e8f69..aa9dcfc36cd3 100644
1756 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1757 +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
1758 @@ -1312,7 +1312,6 @@ struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path
1759
1760 neigh->cm = tx;
1761 tx->neigh = neigh;
1762 - tx->path = path;
1763 tx->dev = dev;
1764 list_add(&tx->list, &priv->cm.start_list);
1765 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
1766 @@ -1371,7 +1370,7 @@ static void ipoib_cm_tx_start(struct work_struct *work)
1767 neigh->daddr + QPN_AND_OPTIONS_OFFSET);
1768 goto free_neigh;
1769 }
1770 - memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
1771 + memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
1772
1773 spin_unlock_irqrestore(&priv->lock, flags);
1774 netif_tx_unlock_bh(dev);
1775 diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
1776 index 225ae6980182..628ef617bb2f 100644
1777 --- a/drivers/input/mouse/elan_i2c_core.c
1778 +++ b/drivers/input/mouse/elan_i2c_core.c
1779 @@ -1337,6 +1337,7 @@ static const struct acpi_device_id elan_acpi_id[] = {
1780 { "ELAN0000", 0 },
1781 { "ELAN0100", 0 },
1782 { "ELAN0600", 0 },
1783 + { "ELAN0601", 0 },
1784 { "ELAN0602", 0 },
1785 { "ELAN0605", 0 },
1786 { "ELAN0608", 0 },
1787 diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
1788 index 38bfaca48eab..150f9eecaca7 100644
1789 --- a/drivers/input/tablet/wacom_serial4.c
1790 +++ b/drivers/input/tablet/wacom_serial4.c
1791 @@ -187,6 +187,7 @@ enum {
1792 MODEL_DIGITIZER_II = 0x5544, /* UD */
1793 MODEL_GRAPHIRE = 0x4554, /* ET */
1794 MODEL_PENPARTNER = 0x4354, /* CT */
1795 + MODEL_ARTPAD_II = 0x4B54, /* KT */
1796 };
1797
1798 static void wacom_handle_model_response(struct wacom *wacom)
1799 @@ -245,6 +246,7 @@ static void wacom_handle_model_response(struct wacom *wacom)
1800 wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
1801 break;
1802
1803 + case MODEL_ARTPAD_II:
1804 case MODEL_DIGITIZER_II:
1805 wacom->dev->name = "Wacom Digitizer II";
1806 wacom->dev->id.version = MODEL_DIGITIZER_II;
1807 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1808 index 34c9aa76a7bd..27500abe8ca7 100644
1809 --- a/drivers/iommu/amd_iommu.c
1810 +++ b/drivers/iommu/amd_iommu.c
1811 @@ -1929,16 +1929,13 @@ static void do_attach(struct iommu_dev_data *dev_data,
1812
1813 static void do_detach(struct iommu_dev_data *dev_data)
1814 {
1815 + struct protection_domain *domain = dev_data->domain;
1816 struct amd_iommu *iommu;
1817 u16 alias;
1818
1819 iommu = amd_iommu_rlookup_table[dev_data->devid];
1820 alias = dev_data->alias;
1821
1822 - /* decrease reference counters */
1823 - dev_data->domain->dev_iommu[iommu->index] -= 1;
1824 - dev_data->domain->dev_cnt -= 1;
1825 -
1826 /* Update data structures */
1827 dev_data->domain = NULL;
1828 list_del(&dev_data->list);
1829 @@ -1948,6 +1945,16 @@ static void do_detach(struct iommu_dev_data *dev_data)
1830
1831 /* Flush the DTE entry */
1832 device_flush_dte(dev_data);
1833 +
1834 + /* Flush IOTLB */
1835 + domain_flush_tlb_pde(domain);
1836 +
1837 + /* Wait for the flushes to finish */
1838 + domain_flush_complete(domain);
1839 +
1840 + /* decrease reference counters - needs to happen after the flushes */
1841 + domain->dev_iommu[iommu->index] -= 1;
1842 + domain->dev_cnt -= 1;
1843 }
1844
1845 /*
1846 @@ -2555,13 +2562,13 @@ out_unmap:
1847 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
1848 iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
1849
1850 - if (--mapped_pages)
1851 + if (--mapped_pages == 0)
1852 goto out_free_iova;
1853 }
1854 }
1855
1856 out_free_iova:
1857 - free_iova_fast(&dma_dom->iovad, address, npages);
1858 + free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
1859
1860 out_err:
1861 return 0;
1862 diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
1863 index 4c2246fe5dbe..15579cba1a88 100644
1864 --- a/drivers/irqchip/irq-gic-v3-its.c
1865 +++ b/drivers/irqchip/irq-gic-v3-its.c
1866 @@ -1581,6 +1581,9 @@ static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1867 nr_irqs /= 2;
1868 } while (nr_irqs > 0);
1869
1870 + if (!nr_irqs)
1871 + err = -ENOSPC;
1872 +
1873 if (err)
1874 goto out;
1875
1876 @@ -1951,6 +1954,29 @@ static void its_free_pending_table(struct page *pt)
1877 get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
1878 }
1879
1880 +static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
1881 +{
1882 + u32 count = 1000000; /* 1s! */
1883 + bool clean;
1884 + u64 val;
1885 +
1886 + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1887 + val &= ~GICR_VPENDBASER_Valid;
1888 + gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
1889 +
1890 + do {
1891 + val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1892 + clean = !(val & GICR_VPENDBASER_Dirty);
1893 + if (!clean) {
1894 + count--;
1895 + cpu_relax();
1896 + udelay(1);
1897 + }
1898 + } while (!clean && count);
1899 +
1900 + return val;
1901 +}
1902 +
1903 static void its_cpu_init_lpis(void)
1904 {
1905 void __iomem *rbase = gic_data_rdist_rd_base();
1906 @@ -2024,6 +2050,30 @@ static void its_cpu_init_lpis(void)
1907 val |= GICR_CTLR_ENABLE_LPIS;
1908 writel_relaxed(val, rbase + GICR_CTLR);
1909
1910 + if (gic_rdists->has_vlpis) {
1911 + void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
1912 +
1913 + /*
1914 + * It's possible for CPU to receive VLPIs before it is
1915 + * sheduled as a vPE, especially for the first CPU, and the
1916 + * VLPI with INTID larger than 2^(IDbits+1) will be considered
1917 + * as out of range and dropped by GIC.
1918 + * So we initialize IDbits to known value to avoid VLPI drop.
1919 + */
1920 + val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
1921 + pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
1922 + smp_processor_id(), val);
1923 + gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
1924 +
1925 + /*
1926 + * Also clear Valid bit of GICR_VPENDBASER, in case some
1927 + * ancient programming gets left in and has possibility of
1928 + * corrupting memory.
1929 + */
1930 + val = its_clear_vpend_valid(vlpi_base);
1931 + WARN_ON(val & GICR_VPENDBASER_Dirty);
1932 + }
1933 +
1934 /* Make sure the GIC has seen the above */
1935 dsb(sy);
1936 }
1937 @@ -2644,26 +2694,11 @@ static void its_vpe_schedule(struct its_vpe *vpe)
1938 static void its_vpe_deschedule(struct its_vpe *vpe)
1939 {
1940 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
1941 - u32 count = 1000000; /* 1s! */
1942 - bool clean;
1943 u64 val;
1944
1945 - /* We're being scheduled out */
1946 - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1947 - val &= ~GICR_VPENDBASER_Valid;
1948 - gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
1949 -
1950 - do {
1951 - val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
1952 - clean = !(val & GICR_VPENDBASER_Dirty);
1953 - if (!clean) {
1954 - count--;
1955 - cpu_relax();
1956 - udelay(1);
1957 - }
1958 - } while (!clean && count);
1959 + val = its_clear_vpend_valid(vlpi_base);
1960
1961 - if (unlikely(!clean && !count)) {
1962 + if (unlikely(val & GICR_VPENDBASER_Dirty)) {
1963 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
1964 vpe->idai = false;
1965 vpe->pending_last = true;
1966 diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
1967 index 25f32e1d7764..3496b61a312a 100644
1968 --- a/drivers/irqchip/irq-mmp.c
1969 +++ b/drivers/irqchip/irq-mmp.c
1970 @@ -34,6 +34,9 @@
1971 #define SEL_INT_PENDING (1 << 6)
1972 #define SEL_INT_NUM_MASK 0x3f
1973
1974 +#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
1975 +#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
1976 +
1977 struct icu_chip_data {
1978 int nr_irqs;
1979 unsigned int virq_base;
1980 @@ -190,7 +193,8 @@ static const struct mmp_intc_conf mmp_conf = {
1981 static const struct mmp_intc_conf mmp2_conf = {
1982 .conf_enable = 0x20,
1983 .conf_disable = 0x0,
1984 - .conf_mask = 0x7f,
1985 + .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
1986 + MMP2_ICU_INT_ROUTE_PJ4_FIQ,
1987 };
1988
1989 static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
1990 diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
1991 index 361abbc00486..6f1fd40fce10 100644
1992 --- a/drivers/media/usb/uvc/uvc_driver.c
1993 +++ b/drivers/media/usb/uvc/uvc_driver.c
1994 @@ -1065,11 +1065,19 @@ static int uvc_parse_standard_control(struct uvc_device *dev,
1995 return -EINVAL;
1996 }
1997
1998 - /* Make sure the terminal type MSB is not null, otherwise it
1999 - * could be confused with a unit.
2000 + /*
2001 + * Reject invalid terminal types that would cause issues:
2002 + *
2003 + * - The high byte must be non-zero, otherwise it would be
2004 + * confused with a unit.
2005 + *
2006 + * - Bit 15 must be 0, as we use it internally as a terminal
2007 + * direction flag.
2008 + *
2009 + * Other unknown types are accepted.
2010 */
2011 type = get_unaligned_le16(&buffer[4]);
2012 - if ((type & 0xff00) == 0) {
2013 + if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
2014 uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
2015 "interface %d INPUT_TERMINAL %d has invalid "
2016 "type 0x%04x, skipping\n", udev->devnum,
2017 diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
2018 index 0fb986ba3290..0ae723f75341 100644
2019 --- a/drivers/net/ethernet/altera/altera_msgdma.c
2020 +++ b/drivers/net/ethernet/altera/altera_msgdma.c
2021 @@ -145,7 +145,8 @@ u32 msgdma_tx_completions(struct altera_tse_private *priv)
2022 & 0xffff;
2023
2024 if (inuse) { /* Tx FIFO is not empty */
2025 - ready = priv->tx_prod - priv->tx_cons - inuse - 1;
2026 + ready = max_t(int,
2027 + priv->tx_prod - priv->tx_cons - inuse - 1, 0);
2028 } else {
2029 /* Check for buffered last packet */
2030 status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
2031 diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
2032 index 3d45f4c92cf6..9bbaad9f3d63 100644
2033 --- a/drivers/net/ethernet/cadence/macb.h
2034 +++ b/drivers/net/ethernet/cadence/macb.h
2035 @@ -643,6 +643,7 @@
2036 #define MACB_CAPS_JUMBO 0x00000020
2037 #define MACB_CAPS_GEM_HAS_PTP 0x00000040
2038 #define MACB_CAPS_BD_RD_PREFETCH 0x00000080
2039 +#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
2040 #define MACB_CAPS_FIFO_MODE 0x10000000
2041 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
2042 #define MACB_CAPS_SG_DISABLED 0x40000000
2043 @@ -1214,6 +1215,8 @@ struct macb {
2044
2045 int rx_bd_rd_prefetch;
2046 int tx_bd_rd_prefetch;
2047 +
2048 + u32 rx_intr_mask;
2049 };
2050
2051 #ifdef CONFIG_MACB_USE_HWSTAMP
2052 diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
2053 index 8f4b2f9a8e07..8abea1c3844f 100644
2054 --- a/drivers/net/ethernet/cadence/macb_main.c
2055 +++ b/drivers/net/ethernet/cadence/macb_main.c
2056 @@ -56,8 +56,7 @@
2057 /* level of occupied TX descriptors under which we wake up TX process */
2058 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
2059
2060 -#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
2061 - | MACB_BIT(ISR_ROVR))
2062 +#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
2063 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
2064 | MACB_BIT(ISR_RLE) \
2065 | MACB_BIT(TXERR))
2066 @@ -1271,7 +1270,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
2067 queue_writel(queue, ISR, MACB_BIT(RCOMP));
2068 napi_reschedule(napi);
2069 } else {
2070 - queue_writel(queue, IER, MACB_RX_INT_FLAGS);
2071 + queue_writel(queue, IER, bp->rx_intr_mask);
2072 }
2073 }
2074
2075 @@ -1289,7 +1288,7 @@ static void macb_hresp_error_task(unsigned long data)
2076 u32 ctrl;
2077
2078 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2079 - queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
2080 + queue_writel(queue, IDR, bp->rx_intr_mask |
2081 MACB_TX_INT_FLAGS |
2082 MACB_BIT(HRESP));
2083 }
2084 @@ -1319,7 +1318,7 @@ static void macb_hresp_error_task(unsigned long data)
2085
2086 /* Enable interrupts */
2087 queue_writel(queue, IER,
2088 - MACB_RX_INT_FLAGS |
2089 + bp->rx_intr_mask |
2090 MACB_TX_INT_FLAGS |
2091 MACB_BIT(HRESP));
2092 }
2093 @@ -1373,14 +1372,14 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
2094 (unsigned int)(queue - bp->queues),
2095 (unsigned long)status);
2096
2097 - if (status & MACB_RX_INT_FLAGS) {
2098 + if (status & bp->rx_intr_mask) {
2099 /* There's no point taking any more interrupts
2100 * until we have processed the buffers. The
2101 * scheduling call may fail if the poll routine
2102 * is already scheduled, so disable interrupts
2103 * now.
2104 */
2105 - queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
2106 + queue_writel(queue, IDR, bp->rx_intr_mask);
2107 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2108 queue_writel(queue, ISR, MACB_BIT(RCOMP));
2109
2110 @@ -1413,8 +1412,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
2111 /* There is a hardware issue under heavy load where DMA can
2112 * stop, this causes endless "used buffer descriptor read"
2113 * interrupts but it can be cleared by re-enabling RX. See
2114 - * the at91 manual, section 41.3.1 or the Zynq manual
2115 - * section 16.7.4 for details.
2116 + * the at91rm9200 manual, section 41.3.1 or the Zynq manual
2117 + * section 16.7.4 for details. RXUBR is only enabled for
2118 + * these two versions.
2119 */
2120 if (status & MACB_BIT(RXUBR)) {
2121 ctrl = macb_readl(bp, NCR);
2122 @@ -2264,7 +2264,7 @@ static void macb_init_hw(struct macb *bp)
2123
2124 /* Enable interrupts */
2125 queue_writel(queue, IER,
2126 - MACB_RX_INT_FLAGS |
2127 + bp->rx_intr_mask |
2128 MACB_TX_INT_FLAGS |
2129 MACB_BIT(HRESP));
2130 }
2131 @@ -3912,6 +3912,7 @@ static const struct macb_config sama5d4_config = {
2132 };
2133
2134 static const struct macb_config emac_config = {
2135 + .caps = MACB_CAPS_NEEDS_RSTONUBR,
2136 .clk_init = at91ether_clk_init,
2137 .init = at91ether_init,
2138 };
2139 @@ -3933,7 +3934,8 @@ static const struct macb_config zynqmp_config = {
2140 };
2141
2142 static const struct macb_config zynq_config = {
2143 - .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
2144 + .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
2145 + MACB_CAPS_NEEDS_RSTONUBR,
2146 .dma_burst_length = 16,
2147 .clk_init = macb_clk_init,
2148 .init = macb_init,
2149 @@ -4088,6 +4090,10 @@ static int macb_probe(struct platform_device *pdev)
2150 macb_dma_desc_get_size(bp);
2151 }
2152
2153 + bp->rx_intr_mask = MACB_RX_INT_FLAGS;
2154 + if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
2155 + bp->rx_intr_mask |= MACB_BIT(RXUBR);
2156 +
2157 mac = of_get_mac_address(np);
2158 if (mac) {
2159 ether_addr_copy(bp->dev->dev_addr, mac);
2160 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2161 index 6242249c9f4c..b043370c2685 100644
2162 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2163 +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
2164 @@ -2419,6 +2419,8 @@ static int hns_nic_dev_probe(struct platform_device *pdev)
2165 out_notify_fail:
2166 (void)cancel_work_sync(&priv->service_task);
2167 out_read_prop_fail:
2168 + /* safe for ACPI FW */
2169 + of_node_put(to_of_node(priv->fwnode));
2170 free_netdev(ndev);
2171 return ret;
2172 }
2173 @@ -2448,6 +2450,9 @@ static int hns_nic_dev_remove(struct platform_device *pdev)
2174 set_bit(NIC_STATE_REMOVING, &priv->state);
2175 (void)cancel_work_sync(&priv->service_task);
2176
2177 + /* safe for ACPI FW */
2178 + of_node_put(to_of_node(priv->fwnode));
2179 +
2180 free_netdev(ndev);
2181 return 0;
2182 }
2183 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2184 index 774beda040a1..e2710ff48fb0 100644
2185 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2186 +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
2187 @@ -1157,16 +1157,18 @@ static int hns_get_regs_len(struct net_device *net_dev)
2188 */
2189 static int hns_nic_nway_reset(struct net_device *netdev)
2190 {
2191 - int ret = 0;
2192 struct phy_device *phy = netdev->phydev;
2193
2194 - if (netif_running(netdev)) {
2195 - /* if autoneg is disabled, don't restart auto-negotiation */
2196 - if (phy && phy->autoneg == AUTONEG_ENABLE)
2197 - ret = genphy_restart_aneg(phy);
2198 - }
2199 + if (!netif_running(netdev))
2200 + return 0;
2201
2202 - return ret;
2203 + if (!phy)
2204 + return -EOPNOTSUPP;
2205 +
2206 + if (phy->autoneg != AUTONEG_ENABLE)
2207 + return -EINVAL;
2208 +
2209 + return genphy_restart_aneg(phy);
2210 }
2211
2212 static u32
2213 diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
2214 index 017e08452d8c..baf5cc251f32 100644
2215 --- a/drivers/net/ethernet/hisilicon/hns_mdio.c
2216 +++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
2217 @@ -321,7 +321,7 @@ static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
2218 }
2219
2220 hns_mdio_cmd_write(mdio_dev, is_c45,
2221 - MDIO_C45_WRITE_ADDR, phy_id, devad);
2222 + MDIO_C45_READ, phy_id, devad);
2223 }
2224
2225 /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
2226 diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2227 index 2f69ee9221c6..4dd82a1612aa 100644
2228 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
2229 +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
2230 @@ -473,19 +473,19 @@ static void qed_init_qm_pq(struct qed_hwfn *p_hwfn,
2231
2232 /* get pq index according to PQ_FLAGS */
2233 static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
2234 - u32 pq_flags)
2235 + unsigned long pq_flags)
2236 {
2237 struct qed_qm_info *qm_info = &p_hwfn->qm_info;
2238
2239 /* Can't have multiple flags set here */
2240 - if (bitmap_weight((unsigned long *)&pq_flags,
2241 + if (bitmap_weight(&pq_flags,
2242 sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
2243 - DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
2244 + DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
2245 goto err;
2246 }
2247
2248 if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
2249 - DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
2250 + DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
2251 goto err;
2252 }
2253
2254 diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2255 index 67c02ea93906..64ac95ca4df2 100644
2256 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
2257 +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
2258 @@ -609,6 +609,10 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
2259 (!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
2260 !!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
2261
2262 + SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
2263 + (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
2264 + !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
2265 +
2266 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
2267 !!(accept_filter & QED_ACCEPT_BCAST));
2268
2269 @@ -744,6 +748,11 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
2270 return rc;
2271 }
2272
2273 + if (p_params->update_ctl_frame_check) {
2274 + p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
2275 + p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
2276 + }
2277 +
2278 /* Update mcast bins for VFs, PF doesn't use this functionality */
2279 qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
2280
2281 @@ -2207,7 +2216,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2282 u16 num_queues = 0;
2283
2284 /* Since the feature controls only queue-zones,
2285 - * make sure we have the contexts [rx, tx, xdp] to
2286 + * make sure we have the contexts [rx, xdp, tcs] to
2287 * match.
2288 */
2289 for_each_hwfn(cdev, i) {
2290 @@ -2217,7 +2226,8 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
2291 u16 cids;
2292
2293 cids = hwfn->pf_params.eth_pf_params.num_cons;
2294 - num_queues += min_t(u16, l2_queues, cids / 3);
2295 + cids /= (2 + info->num_tc);
2296 + num_queues += min_t(u16, l2_queues, cids);
2297 }
2298
2299 /* queues might theoretically be >256, but interrupts'
2300 @@ -2688,7 +2698,8 @@ static int qed_configure_filter_rx_mode(struct qed_dev *cdev,
2301 if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
2302 accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2303 QED_ACCEPT_MCAST_UNMATCHED;
2304 - accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2305 + accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
2306 + QED_ACCEPT_MCAST_UNMATCHED;
2307 } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
2308 accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2309 accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
2310 diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2311 index 8d80f1095d17..7127d5aaac42 100644
2312 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
2313 +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
2314 @@ -219,6 +219,9 @@ struct qed_sp_vport_update_params {
2315 struct qed_rss_params *rss_params;
2316 struct qed_filter_accept_flags accept_flags;
2317 struct qed_sge_tpa_params *sge_tpa_params;
2318 + u8 update_ctl_frame_check;
2319 + u8 mac_chk_en;
2320 + u8 ethtype_chk_en;
2321 };
2322
2323 int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
2324 diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2325 index 92cd8abeb41d..015de1e0addd 100644
2326 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2327 +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
2328 @@ -2430,19 +2430,24 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2329 {
2330 struct qed_ll2_tx_pkt_info pkt;
2331 const skb_frag_t *frag;
2332 + u8 flags = 0, nr_frags;
2333 int rc = -EINVAL, i;
2334 dma_addr_t mapping;
2335 u16 vlan = 0;
2336 - u8 flags = 0;
2337
2338 if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
2339 DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
2340 return -EINVAL;
2341 }
2342
2343 - if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2344 + /* Cache number of fragments from SKB since SKB may be freed by
2345 + * the completion routine after calling qed_ll2_prepare_tx_packet()
2346 + */
2347 + nr_frags = skb_shinfo(skb)->nr_frags;
2348 +
2349 + if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
2350 DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
2351 - 1 + skb_shinfo(skb)->nr_frags);
2352 + 1 + nr_frags);
2353 return -EINVAL;
2354 }
2355
2356 @@ -2464,7 +2469,7 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2357 }
2358
2359 memset(&pkt, 0, sizeof(pkt));
2360 - pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
2361 + pkt.num_of_bds = 1 + nr_frags;
2362 pkt.vlan = vlan;
2363 pkt.bd_flags = flags;
2364 pkt.tx_dest = QED_LL2_TX_DEST_NW;
2365 @@ -2475,12 +2480,17 @@ static int qed_ll2_start_xmit(struct qed_dev *cdev, struct sk_buff *skb,
2366 test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
2367 pkt.remove_stag = true;
2368
2369 + /* qed_ll2_prepare_tx_packet() may actually send the packet if
2370 + * there are no fragments in the skb and subsequently the completion
2371 + * routine may run and free the SKB, so no dereferencing the SKB
2372 + * beyond this point unless skb has any fragments.
2373 + */
2374 rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
2375 &pkt, 1);
2376 if (rc)
2377 goto err;
2378
2379 - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2380 + for (i = 0; i < nr_frags; i++) {
2381 frag = &skb_shinfo(skb)->frags[i];
2382
2383 mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
2384 diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
2385 index 3157c0d99441..dae2896e1d8e 100644
2386 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
2387 +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
2388 @@ -380,6 +380,7 @@ void qed_consq_setup(struct qed_hwfn *p_hwfn);
2389 * @param p_hwfn
2390 */
2391 void qed_consq_free(struct qed_hwfn *p_hwfn);
2392 +int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
2393
2394 /**
2395 * @file
2396 diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
2397 index 7106ad17afe2..a0ee847f379b 100644
2398 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
2399 +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
2400 @@ -402,6 +402,11 @@ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
2401
2402 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
2403
2404 + /* Attempt to post pending requests */
2405 + spin_lock_bh(&p_hwfn->p_spq->lock);
2406 + rc = qed_spq_pend_post(p_hwfn);
2407 + spin_unlock_bh(&p_hwfn->p_spq->lock);
2408 +
2409 return rc;
2410 }
2411
2412 @@ -745,7 +750,7 @@ static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
2413 return 0;
2414 }
2415
2416 -static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
2417 +int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
2418 {
2419 struct qed_spq *p_spq = p_hwfn->p_spq;
2420 struct qed_spq_entry *p_ent = NULL;
2421 @@ -883,7 +888,6 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
2422 struct qed_spq_entry *p_ent = NULL;
2423 struct qed_spq_entry *tmp;
2424 struct qed_spq_entry *found = NULL;
2425 - int rc;
2426
2427 if (!p_hwfn)
2428 return -EINVAL;
2429 @@ -941,12 +945,7 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn,
2430 */
2431 qed_spq_return_entry(p_hwfn, found);
2432
2433 - /* Attempt to post pending requests */
2434 - spin_lock_bh(&p_spq->lock);
2435 - rc = qed_spq_pend_post(p_hwfn);
2436 - spin_unlock_bh(&p_spq->lock);
2437 -
2438 - return rc;
2439 + return 0;
2440 }
2441
2442 int qed_consq_alloc(struct qed_hwfn *p_hwfn)
2443 diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2444 index ca6290fa0f30..71a7af134dd8 100644
2445 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2446 +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
2447 @@ -1969,7 +1969,9 @@ static void qed_iov_vf_mbx_start_vport(struct qed_hwfn *p_hwfn,
2448 params.vport_id = vf->vport_id;
2449 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2450 params.mtu = vf->mtu;
2451 - params.check_mac = true;
2452 +
2453 + /* Non trusted VFs should enable control frame filtering */
2454 + params.check_mac = !vf->p_vf_info.is_trusted_configured;
2455
2456 rc = qed_sp_eth_vport_start(p_hwfn, &params);
2457 if (rc) {
2458 @@ -5130,6 +5132,9 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
2459 params.opaque_fid = vf->opaque_fid;
2460 params.vport_id = vf->vport_id;
2461
2462 + params.update_ctl_frame_check = 1;
2463 + params.mac_chk_en = !vf_info->is_trusted_configured;
2464 +
2465 if (vf_info->rx_accept_mode & mask) {
2466 flags->update_rx_mode_config = 1;
2467 flags->rx_accept_filter = vf_info->rx_accept_mode;
2468 @@ -5147,7 +5152,8 @@ static void qed_iov_handle_trust_change(struct qed_hwfn *hwfn)
2469 }
2470
2471 if (flags->update_rx_mode_config ||
2472 - flags->update_tx_mode_config)
2473 + flags->update_tx_mode_config ||
2474 + params.update_ctl_frame_check)
2475 qed_sp_vport_update(hwfn, &params,
2476 QED_SPQ_MODE_EBLOCK, NULL);
2477 }
2478 diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2479 index be118d057b92..6ab3fb008139 100644
2480 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
2481 +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
2482 @@ -261,6 +261,7 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
2483 struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
2484 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
2485 struct vf_pf_resc_request *p_resc;
2486 + u8 retry_cnt = VF_ACQUIRE_THRESH;
2487 bool resources_acquired = false;
2488 struct vfpf_acquire_tlv *req;
2489 int rc = 0, attempts = 0;
2490 @@ -314,6 +315,15 @@ static int qed_vf_pf_acquire(struct qed_hwfn *p_hwfn)
2491
2492 /* send acquire request */
2493 rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
2494 +
2495 + /* Re-try acquire in case of vf-pf hw channel timeout */
2496 + if (retry_cnt && rc == -EBUSY) {
2497 + DP_VERBOSE(p_hwfn, QED_MSG_IOV,
2498 + "VF retrying to acquire due to VPC timeout\n");
2499 + retry_cnt--;
2500 + continue;
2501 + }
2502 +
2503 if (rc)
2504 goto exit;
2505
2506 diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
2507 index 6a4d266fb8e2..d242a5724069 100644
2508 --- a/drivers/net/ethernet/qlogic/qede/qede.h
2509 +++ b/drivers/net/ethernet/qlogic/qede/qede.h
2510 @@ -489,6 +489,9 @@ struct qede_reload_args {
2511
2512 /* Datapath functions definition */
2513 netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
2514 +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
2515 + struct net_device *sb_dev,
2516 + select_queue_fallback_t fallback);
2517 netdev_features_t qede_features_check(struct sk_buff *skb,
2518 struct net_device *dev,
2519 netdev_features_t features);
2520 diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
2521 index 1a78027de071..a96da16f3404 100644
2522 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
2523 +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
2524 @@ -1695,6 +1695,19 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2525 return NETDEV_TX_OK;
2526 }
2527
2528 +u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
2529 + struct net_device *sb_dev,
2530 + select_queue_fallback_t fallback)
2531 +{
2532 + struct qede_dev *edev = netdev_priv(dev);
2533 + int total_txq;
2534 +
2535 + total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
2536 +
2537 + return QEDE_TSS_COUNT(edev) ?
2538 + fallback(dev, skb, NULL) % total_txq : 0;
2539 +}
2540 +
2541 /* 8B udp header + 8B base tunnel header + 32B option length */
2542 #define QEDE_MAX_TUN_HDR_LEN 48
2543
2544 diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
2545 index 46d0f2eaa0c0..f3d9c40c4115 100644
2546 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c
2547 +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
2548 @@ -631,6 +631,7 @@ static const struct net_device_ops qede_netdev_ops = {
2549 .ndo_open = qede_open,
2550 .ndo_stop = qede_close,
2551 .ndo_start_xmit = qede_start_xmit,
2552 + .ndo_select_queue = qede_select_queue,
2553 .ndo_set_rx_mode = qede_set_rx_mode,
2554 .ndo_set_mac_address = qede_set_mac_addr,
2555 .ndo_validate_addr = eth_validate_addr,
2556 @@ -666,6 +667,7 @@ static const struct net_device_ops qede_netdev_vf_ops = {
2557 .ndo_open = qede_open,
2558 .ndo_stop = qede_close,
2559 .ndo_start_xmit = qede_start_xmit,
2560 + .ndo_select_queue = qede_select_queue,
2561 .ndo_set_rx_mode = qede_set_rx_mode,
2562 .ndo_set_mac_address = qede_set_mac_addr,
2563 .ndo_validate_addr = eth_validate_addr,
2564 @@ -684,6 +686,7 @@ static const struct net_device_ops qede_netdev_vf_xdp_ops = {
2565 .ndo_open = qede_open,
2566 .ndo_stop = qede_close,
2567 .ndo_start_xmit = qede_start_xmit,
2568 + .ndo_select_queue = qede_select_queue,
2569 .ndo_set_rx_mode = qede_set_rx_mode,
2570 .ndo_set_mac_address = qede_set_mac_addr,
2571 .ndo_validate_addr = eth_validate_addr,
2572 diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2573 index 7b923362ee55..3b174eae77c1 100644
2574 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2575 +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
2576 @@ -1342,8 +1342,10 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
2577 }
2578
2579 ret = phy_power_on(bsp_priv, true);
2580 - if (ret)
2581 + if (ret) {
2582 + gmac_clk_enable(bsp_priv, false);
2583 return ret;
2584 + }
2585
2586 pm_runtime_enable(dev);
2587 pm_runtime_get_sync(dev);
2588 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2589 index 9caf79ba5ef1..4d5fb4b51cc4 100644
2590 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2591 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
2592 @@ -719,8 +719,11 @@ static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
2593 {
2594 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
2595
2596 - if (!clk)
2597 - return 0;
2598 + if (!clk) {
2599 + clk = priv->plat->clk_ref_rate;
2600 + if (!clk)
2601 + return 0;
2602 + }
2603
2604 return (usec * (clk / 1000000)) / 256;
2605 }
2606 @@ -729,8 +732,11 @@ static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
2607 {
2608 unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
2609
2610 - if (!clk)
2611 - return 0;
2612 + if (!clk) {
2613 + clk = priv->plat->clk_ref_rate;
2614 + if (!clk)
2615 + return 0;
2616 + }
2617
2618 return (riwt * 256) / (clk / 1000000);
2619 }
2620 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2621 index 123b74e25ed8..43ab9e905bed 100644
2622 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2623 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2624 @@ -3028,10 +3028,22 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2625
2626 tx_q = &priv->tx_queue[queue];
2627
2628 + if (priv->tx_path_in_lpi_mode)
2629 + stmmac_disable_eee_mode(priv);
2630 +
2631 /* Manage oversized TCP frames for GMAC4 device */
2632 if (skb_is_gso(skb) && priv->tso) {
2633 - if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
2634 + if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
2635 + /*
2636 + * There is no way to determine the number of TSO
2637 + * capable Queues. Let's use always the Queue 0
2638 + * because if TSO is supported then at least this
2639 + * one will be capable.
2640 + */
2641 + skb_set_queue_mapping(skb, 0);
2642 +
2643 return stmmac_tso_xmit(skb, dev);
2644 + }
2645 }
2646
2647 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
2648 @@ -3046,9 +3058,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2649 return NETDEV_TX_BUSY;
2650 }
2651
2652 - if (priv->tx_path_in_lpi_mode)
2653 - stmmac_disable_eee_mode(priv);
2654 -
2655 entry = tx_q->cur_tx;
2656 first_entry = entry;
2657 WARN_ON(tx_q->tx_skbuff[first_entry]);
2658 diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
2659 index c070a9e51ebf..fae572b38416 100644
2660 --- a/drivers/net/wireless/ath/ath9k/init.c
2661 +++ b/drivers/net/wireless/ath/ath9k/init.c
2662 @@ -636,15 +636,15 @@ static int ath9k_of_init(struct ath_softc *sc)
2663 ret = ath9k_eeprom_request(sc, eeprom_name);
2664 if (ret)
2665 return ret;
2666 +
2667 + ah->ah_flags &= ~AH_USE_EEPROM;
2668 + ah->ah_flags |= AH_NO_EEP_SWAP;
2669 }
2670
2671 mac = of_get_mac_address(np);
2672 if (mac)
2673 ether_addr_copy(common->macaddr, mac);
2674
2675 - ah->ah_flags &= ~AH_USE_EEPROM;
2676 - ah->ah_flags |= AH_NO_EEP_SWAP;
2677 -
2678 return 0;
2679 }
2680
2681 diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
2682 index 750bea3574ee..627df164b7b6 100644
2683 --- a/drivers/net/wireless/ti/wlcore/sdio.c
2684 +++ b/drivers/net/wireless/ti/wlcore/sdio.c
2685 @@ -164,6 +164,12 @@ static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
2686 }
2687
2688 sdio_claim_host(func);
2689 + /*
2690 + * To guarantee that the SDIO card is power cycled, as required to make
2691 + * the FW programming to succeed, let's do a brute force HW reset.
2692 + */
2693 + mmc_hw_reset(card->host);
2694 +
2695 sdio_enable_func(func);
2696 sdio_release_host(func);
2697
2698 @@ -174,20 +180,13 @@ static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
2699 {
2700 struct sdio_func *func = dev_to_sdio_func(glue->dev);
2701 struct mmc_card *card = func->card;
2702 - int error;
2703
2704 sdio_claim_host(func);
2705 sdio_disable_func(func);
2706 sdio_release_host(func);
2707
2708 /* Let runtime PM know the card is powered off */
2709 - error = pm_runtime_put(&card->dev);
2710 - if (error < 0 && error != -EBUSY) {
2711 - dev_err(&card->dev, "%s failed: %i\n", __func__, error);
2712 -
2713 - return error;
2714 - }
2715 -
2716 + pm_runtime_put(&card->dev);
2717 return 0;
2718 }
2719
2720 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
2721 index e0d2b7473901..2cdb3032ca0f 100644
2722 --- a/drivers/nvme/host/core.c
2723 +++ b/drivers/nvme/host/core.c
2724 @@ -1182,6 +1182,7 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
2725 * effects say only one namespace is affected.
2726 */
2727 if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
2728 + mutex_lock(&ctrl->scan_lock);
2729 nvme_start_freeze(ctrl);
2730 nvme_wait_freeze(ctrl);
2731 }
2732 @@ -1210,8 +1211,10 @@ static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
2733 */
2734 if (effects & NVME_CMD_EFFECTS_LBCC)
2735 nvme_update_formats(ctrl);
2736 - if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
2737 + if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
2738 nvme_unfreeze(ctrl);
2739 + mutex_unlock(&ctrl->scan_lock);
2740 + }
2741 if (effects & NVME_CMD_EFFECTS_CCC)
2742 nvme_init_identify(ctrl);
2743 if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
2744 @@ -3292,6 +3295,7 @@ static void nvme_scan_work(struct work_struct *work)
2745 if (nvme_identify_ctrl(ctrl, &id))
2746 return;
2747
2748 + mutex_lock(&ctrl->scan_lock);
2749 nn = le32_to_cpu(id->nn);
2750 if (ctrl->vs >= NVME_VS(1, 1, 0) &&
2751 !(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
2752 @@ -3300,6 +3304,7 @@ static void nvme_scan_work(struct work_struct *work)
2753 }
2754 nvme_scan_ns_sequential(ctrl, nn);
2755 out_free_id:
2756 + mutex_unlock(&ctrl->scan_lock);
2757 kfree(id);
2758 down_write(&ctrl->namespaces_rwsem);
2759 list_sort(NULL, &ctrl->namespaces, ns_cmp);
2760 @@ -3535,6 +3540,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
2761
2762 ctrl->state = NVME_CTRL_NEW;
2763 spin_lock_init(&ctrl->lock);
2764 + mutex_init(&ctrl->scan_lock);
2765 INIT_LIST_HEAD(&ctrl->namespaces);
2766 init_rwsem(&ctrl->namespaces_rwsem);
2767 ctrl->dev = dev;
2768 diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
2769 index 60220de2db52..e82cdaec81c9 100644
2770 --- a/drivers/nvme/host/nvme.h
2771 +++ b/drivers/nvme/host/nvme.h
2772 @@ -148,6 +148,7 @@ struct nvme_ctrl {
2773 enum nvme_ctrl_state state;
2774 bool identified;
2775 spinlock_t lock;
2776 + struct mutex scan_lock;
2777 const struct nvme_ctrl_ops *ops;
2778 struct request_queue *admin_q;
2779 struct request_queue *connect_q;
2780 diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2781 index f46313f441ec..7b9ef8e734e7 100644
2782 --- a/drivers/nvme/host/pci.c
2783 +++ b/drivers/nvme/host/pci.c
2784 @@ -2260,27 +2260,18 @@ static void nvme_reset_work(struct work_struct *work)
2785 if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
2786 nvme_dev_disable(dev, false);
2787
2788 - /*
2789 - * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2790 - * initializing procedure here.
2791 - */
2792 - if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2793 - dev_warn(dev->ctrl.device,
2794 - "failed to mark controller CONNECTING\n");
2795 - goto out;
2796 - }
2797 -
2798 + mutex_lock(&dev->shutdown_lock);
2799 result = nvme_pci_enable(dev);
2800 if (result)
2801 - goto out;
2802 + goto out_unlock;
2803
2804 result = nvme_pci_configure_admin_queue(dev);
2805 if (result)
2806 - goto out;
2807 + goto out_unlock;
2808
2809 result = nvme_alloc_admin_tags(dev);
2810 if (result)
2811 - goto out;
2812 + goto out_unlock;
2813
2814 /*
2815 * Limit the max command size to prevent iod->sg allocations going
2816 @@ -2288,6 +2279,17 @@ static void nvme_reset_work(struct work_struct *work)
2817 */
2818 dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
2819 dev->ctrl.max_segments = NVME_MAX_SEGS;
2820 + mutex_unlock(&dev->shutdown_lock);
2821 +
2822 + /*
2823 + * Introduce CONNECTING state from nvme-fc/rdma transports to mark the
2824 + * initializing procedure here.
2825 + */
2826 + if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
2827 + dev_warn(dev->ctrl.device,
2828 + "failed to mark controller CONNECTING\n");
2829 + goto out;
2830 + }
2831
2832 result = nvme_init_identify(&dev->ctrl);
2833 if (result)
2834 @@ -2352,6 +2354,8 @@ static void nvme_reset_work(struct work_struct *work)
2835 nvme_start_ctrl(&dev->ctrl);
2836 return;
2837
2838 + out_unlock:
2839 + mutex_unlock(&dev->shutdown_lock);
2840 out:
2841 nvme_remove_dead_ctrl(dev, result);
2842 }
2843 diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
2844 index cf73a403d22d..cecbce21d01f 100644
2845 --- a/drivers/pinctrl/pinctrl-mcp23s08.c
2846 +++ b/drivers/pinctrl/pinctrl-mcp23s08.c
2847 @@ -832,8 +832,13 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
2848 break;
2849
2850 case MCP_TYPE_S18:
2851 + one_regmap_config =
2852 + devm_kmemdup(dev, &mcp23x17_regmap,
2853 + sizeof(struct regmap_config), GFP_KERNEL);
2854 + if (!one_regmap_config)
2855 + return -ENOMEM;
2856 mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
2857 - &mcp23x17_regmap);
2858 + one_regmap_config);
2859 mcp->reg_shift = 1;
2860 mcp->chip.ngpio = 16;
2861 mcp->chip.label = "mcp23s18";
2862 diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
2863 index 0c1aa6c314f5..7563c07e14e4 100644
2864 --- a/drivers/platform/x86/Kconfig
2865 +++ b/drivers/platform/x86/Kconfig
2866 @@ -856,6 +856,7 @@ config TOSHIBA_WMI
2867 config ACPI_CMPC
2868 tristate "CMPC Laptop Extras"
2869 depends on ACPI && INPUT
2870 + depends on BACKLIGHT_LCD_SUPPORT
2871 depends on RFKILL || RFKILL=n
2872 select BACKLIGHT_CLASS_DEVICE
2873 help
2874 @@ -1077,6 +1078,7 @@ config INTEL_OAKTRAIL
2875 config SAMSUNG_Q10
2876 tristate "Samsung Q10 Extras"
2877 depends on ACPI
2878 + depends on BACKLIGHT_LCD_SUPPORT
2879 select BACKLIGHT_CLASS_DEVICE
2880 ---help---
2881 This driver provides support for backlight control on Samsung Q10
2882 diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
2883 index 970654fcc48d..2d1f6a583641 100644
2884 --- a/drivers/s390/net/qeth_core.h
2885 +++ b/drivers/s390/net/qeth_core.h
2886 @@ -22,6 +22,7 @@
2887 #include <linux/hashtable.h>
2888 #include <linux/ip.h>
2889 #include <linux/refcount.h>
2890 +#include <linux/workqueue.h>
2891
2892 #include <net/ipv6.h>
2893 #include <net/if_inet6.h>
2894 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
2895 index b03515d43745..56aacf32f71b 100644
2896 --- a/drivers/s390/net/qeth_core_main.c
2897 +++ b/drivers/s390/net/qeth_core_main.c
2898 @@ -565,6 +565,7 @@ static int __qeth_issue_next_read(struct qeth_card *card)
2899 QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
2900 "rc=%i\n", dev_name(&card->gdev->dev), rc);
2901 atomic_set(&channel->irq_pending, 0);
2902 + qeth_release_buffer(channel, iob);
2903 card->read_or_write_problem = 1;
2904 qeth_schedule_recovery(card);
2905 wake_up(&card->wait_q);
2906 @@ -1187,6 +1188,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
2907 rc = qeth_get_problem(cdev, irb);
2908 if (rc) {
2909 card->read_or_write_problem = 1;
2910 + if (iob)
2911 + qeth_release_buffer(iob->channel, iob);
2912 qeth_clear_ipacmd_list(card);
2913 qeth_schedule_recovery(card);
2914 goto out;
2915 @@ -1852,6 +1855,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
2916 QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
2917 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
2918 atomic_set(&channel->irq_pending, 0);
2919 + qeth_release_buffer(channel, iob);
2920 wake_up(&card->wait_q);
2921 return rc;
2922 }
2923 @@ -1923,6 +1927,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
2924 rc);
2925 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
2926 atomic_set(&channel->irq_pending, 0);
2927 + qeth_release_buffer(channel, iob);
2928 wake_up(&card->wait_q);
2929 return rc;
2930 }
2931 @@ -2110,6 +2115,7 @@ int qeth_send_control_data(struct qeth_card *card, int len,
2932 }
2933 reply = qeth_alloc_reply(card);
2934 if (!reply) {
2935 + qeth_release_buffer(channel, iob);
2936 return -ENOMEM;
2937 }
2938 reply->callback = reply_cb;
2939 @@ -2448,11 +2454,12 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2940 return 0;
2941 }
2942
2943 -static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
2944 +static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2945 {
2946 if (!q)
2947 return;
2948
2949 + qeth_clear_outq_buffers(q, 1);
2950 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2951 kfree(q);
2952 }
2953 @@ -2526,10 +2533,8 @@ out_freeoutqbufs:
2954 card->qdio.out_qs[i]->bufs[j] = NULL;
2955 }
2956 out_freeoutq:
2957 - while (i > 0) {
2958 - qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
2959 - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2960 - }
2961 + while (i > 0)
2962 + qeth_free_output_queue(card->qdio.out_qs[--i]);
2963 kfree(card->qdio.out_qs);
2964 card->qdio.out_qs = NULL;
2965 out_freepool:
2966 @@ -2562,10 +2567,8 @@ static void qeth_free_qdio_buffers(struct qeth_card *card)
2967 qeth_free_buffer_pool(card);
2968 /* free outbound qdio_qs */
2969 if (card->qdio.out_qs) {
2970 - for (i = 0; i < card->qdio.no_out_queues; ++i) {
2971 - qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2972 - qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
2973 - }
2974 + for (i = 0; i < card->qdio.no_out_queues; i++)
2975 + qeth_free_output_queue(card->qdio.out_qs[i]);
2976 kfree(card->qdio.out_qs);
2977 card->qdio.out_qs = NULL;
2978 }
2979 diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
2980 index 76b2fba5fba2..b7513c5848cf 100644
2981 --- a/drivers/s390/net/qeth_l2_main.c
2982 +++ b/drivers/s390/net/qeth_l2_main.c
2983 @@ -854,6 +854,8 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev)
2984
2985 if (cgdev->state == CCWGROUP_ONLINE)
2986 qeth_l2_set_offline(cgdev);
2987 +
2988 + cancel_work_sync(&card->close_dev_work);
2989 if (qeth_netdev_is_registered(card->dev))
2990 unregister_netdev(card->dev);
2991 }
2992 diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
2993 index b7f6a8384543..7f71ca0d08e7 100644
2994 --- a/drivers/s390/net/qeth_l3_main.c
2995 +++ b/drivers/s390/net/qeth_l3_main.c
2996 @@ -2611,6 +2611,7 @@ static void qeth_l3_remove_device(struct ccwgroup_device *cgdev)
2997 if (cgdev->state == CCWGROUP_ONLINE)
2998 qeth_l3_set_offline(cgdev);
2999
3000 + cancel_work_sync(&card->close_dev_work);
3001 if (qeth_netdev_is_registered(card->dev))
3002 unregister_netdev(card->dev);
3003 qeth_l3_clear_ip_htable(card, 0);
3004 diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
3005 index 6be77b3aa8a5..ac79f2088b31 100644
3006 --- a/drivers/scsi/53c700.c
3007 +++ b/drivers/scsi/53c700.c
3008 @@ -295,7 +295,7 @@ NCR_700_detect(struct scsi_host_template *tpnt,
3009 if(tpnt->sdev_attrs == NULL)
3010 tpnt->sdev_attrs = NCR_700_dev_attrs;
3011
3012 - memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
3013 + memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
3014 GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
3015 if(memory == NULL) {
3016 printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
3017 diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
3018 index 6e1b022a823d..3236240a4edd 100644
3019 --- a/drivers/scsi/aacraid/commsup.c
3020 +++ b/drivers/scsi/aacraid/commsup.c
3021 @@ -1304,8 +1304,9 @@ static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
3022 ADD : DELETE;
3023 break;
3024 }
3025 - case AifBuManagerEvent:
3026 - aac_handle_aif_bu(dev, aifcmd);
3027 + break;
3028 + case AifBuManagerEvent:
3029 + aac_handle_aif_bu(dev, aifcmd);
3030 break;
3031 }
3032
3033 diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
3034 index 350257c13a5b..bc9f2a2365f4 100644
3035 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c
3036 +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
3037 @@ -240,6 +240,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
3038 return NULL;
3039 }
3040
3041 + cmgr->hba = hba;
3042 cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
3043 GFP_KERNEL);
3044 if (!cmgr->free_list) {
3045 @@ -256,7 +257,6 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
3046 goto mem_err;
3047 }
3048
3049 - cmgr->hba = hba;
3050 cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
3051
3052 for (i = 0; i < arr_sz; i++) {
3053 @@ -295,7 +295,7 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
3054
3055 /* Allocate pool of io_bdts - one for each bnx2fc_cmd */
3056 mem_size = num_ios * sizeof(struct io_bdt *);
3057 - cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
3058 + cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
3059 if (!cmgr->io_bdt_pool) {
3060 printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
3061 goto mem_err;
3062 diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
3063 index be83590ed955..ff943f477d6f 100644
3064 --- a/drivers/scsi/libfc/fc_lport.c
3065 +++ b/drivers/scsi/libfc/fc_lport.c
3066 @@ -1726,14 +1726,14 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
3067 fc_frame_payload_op(fp) != ELS_LS_ACC) {
3068 FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
3069 fc_lport_error(lport, fp);
3070 - goto err;
3071 + goto out;
3072 }
3073
3074 flp = fc_frame_payload_get(fp, sizeof(*flp));
3075 if (!flp) {
3076 FC_LPORT_DBG(lport, "FLOGI bad response\n");
3077 fc_lport_error(lport, fp);
3078 - goto err;
3079 + goto out;
3080 }
3081
3082 mfs = ntohs(flp->fl_csp.sp_bb_data) &
3083 @@ -1743,7 +1743,7 @@ void fc_lport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
3084 FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
3085 "lport->mfs:%hu\n", mfs, lport->mfs);
3086 fc_lport_error(lport, fp);
3087 - goto err;
3088 + goto out;
3089 }
3090
3091 if (mfs <= lport->mfs) {
3092 diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
3093 index 372387a450df..1797e47fab38 100644
3094 --- a/drivers/scsi/libfc/fc_rport.c
3095 +++ b/drivers/scsi/libfc/fc_rport.c
3096 @@ -184,7 +184,6 @@ void fc_rport_destroy(struct kref *kref)
3097 struct fc_rport_priv *rdata;
3098
3099 rdata = container_of(kref, struct fc_rport_priv, kref);
3100 - WARN_ON(!list_empty(&rdata->peers));
3101 kfree_rcu(rdata, rcu);
3102 }
3103 EXPORT_SYMBOL(fc_rport_destroy);
3104 diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
3105 index 60bcc6df97a9..65305b3848bc 100644
3106 --- a/drivers/scsi/scsi_debug.c
3107 +++ b/drivers/scsi/scsi_debug.c
3108 @@ -62,7 +62,7 @@
3109
3110 /* make sure inq_product_rev string corresponds to this version */
3111 #define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
3112 -static const char *sdebug_version_date = "20180128";
3113 +static const char *sdebug_version_date = "20190125";
3114
3115 #define MY_NAME "scsi_debug"
3116
3117 @@ -735,7 +735,7 @@ static inline bool scsi_debug_lbp(void)
3118 (sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
3119 }
3120
3121 -static void *fake_store(unsigned long long lba)
3122 +static void *lba2fake_store(unsigned long long lba)
3123 {
3124 lba = do_div(lba, sdebug_store_sectors);
3125
3126 @@ -2514,8 +2514,8 @@ static int do_device_access(struct scsi_cmnd *scmd, u32 sg_skip, u64 lba,
3127 return ret;
3128 }
3129
3130 -/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
3131 - * arr into fake_store(lba,num) and return true. If comparison fails then
3132 +/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
3133 + * arr into lba2fake_store(lba,num) and return true. If comparison fails then
3134 * return false. */
3135 static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
3136 {
3137 @@ -2643,7 +2643,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
3138 if (sdt->app_tag == cpu_to_be16(0xffff))
3139 continue;
3140
3141 - ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
3142 + ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
3143 if (ret) {
3144 dif_errors++;
3145 return ret;
3146 @@ -3261,10 +3261,12 @@ err_out:
3147 static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3148 u32 ei_lba, bool unmap, bool ndob)
3149 {
3150 + int ret;
3151 unsigned long iflags;
3152 unsigned long long i;
3153 - int ret;
3154 - u64 lba_off;
3155 + u32 lb_size = sdebug_sector_size;
3156 + u64 block, lbaa;
3157 + u8 *fs1p;
3158
3159 ret = check_device_access_params(scp, lba, num);
3160 if (ret)
3161 @@ -3276,31 +3278,30 @@ static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
3162 unmap_region(lba, num);
3163 goto out;
3164 }
3165 -
3166 - lba_off = lba * sdebug_sector_size;
3167 + lbaa = lba;
3168 + block = do_div(lbaa, sdebug_store_sectors);
3169 /* if ndob then zero 1 logical block, else fetch 1 logical block */
3170 + fs1p = fake_storep + (block * lb_size);
3171 if (ndob) {
3172 - memset(fake_storep + lba_off, 0, sdebug_sector_size);
3173 + memset(fs1p, 0, lb_size);
3174 ret = 0;
3175 } else
3176 - ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
3177 - sdebug_sector_size);
3178 + ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
3179
3180 if (-1 == ret) {
3181 write_unlock_irqrestore(&atomic_rw, iflags);
3182 return DID_ERROR << 16;
3183 - } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
3184 + } else if (sdebug_verbose && !ndob && (ret < lb_size))
3185 sdev_printk(KERN_INFO, scp->device,
3186 "%s: %s: lb size=%u, IO sent=%d bytes\n",
3187 - my_name, "write same",
3188 - sdebug_sector_size, ret);
3189 + my_name, "write same", lb_size, ret);
3190
3191 /* Copy first sector to remaining blocks */
3192 - for (i = 1 ; i < num ; i++)
3193 - memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
3194 - fake_storep + lba_off,
3195 - sdebug_sector_size);
3196 -
3197 + for (i = 1 ; i < num ; i++) {
3198 + lbaa = lba + i;
3199 + block = do_div(lbaa, sdebug_store_sectors);
3200 + memmove(fake_storep + (block * lb_size), fs1p, lb_size);
3201 + }
3202 if (scsi_debug_lbp())
3203 map_region(lba, num);
3204 out:
3205 diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
3206 index 8cc015183043..a4ac6073c555 100644
3207 --- a/drivers/soc/fsl/qbman/qman.c
3208 +++ b/drivers/soc/fsl/qbman/qman.c
3209 @@ -1081,18 +1081,19 @@ static void qm_mr_process_task(struct work_struct *work);
3210 static irqreturn_t portal_isr(int irq, void *ptr)
3211 {
3212 struct qman_portal *p = ptr;
3213 -
3214 - u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
3215 u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
3216 + u32 clear = 0;
3217
3218 if (unlikely(!is))
3219 return IRQ_NONE;
3220
3221 /* DQRR-handling if it's interrupt-driven */
3222 - if (is & QM_PIRQ_DQRI)
3223 + if (is & QM_PIRQ_DQRI) {
3224 __poll_portal_fast(p, QMAN_POLL_LIMIT);
3225 + clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
3226 + }
3227 /* Handling of anything else that's interrupt-driven */
3228 - clear |= __poll_portal_slow(p, is);
3229 + clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
3230 qm_out(&p->p, QM_REG_ISR, clear);
3231 return IRQ_HANDLED;
3232 }
3233 diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
3234 index 9e7815f55a17..7448744cc515 100644
3235 --- a/drivers/staging/erofs/inode.c
3236 +++ b/drivers/staging/erofs/inode.c
3237 @@ -184,16 +184,16 @@ static int fill_inode(struct inode *inode, int isdir)
3238 /* setup the new inode */
3239 if (S_ISREG(inode->i_mode)) {
3240 #ifdef CONFIG_EROFS_FS_XATTR
3241 - if (vi->xattr_isize)
3242 - inode->i_op = &erofs_generic_xattr_iops;
3243 + inode->i_op = &erofs_generic_xattr_iops;
3244 #endif
3245 inode->i_fop = &generic_ro_fops;
3246 } else if (S_ISDIR(inode->i_mode)) {
3247 inode->i_op =
3248 #ifdef CONFIG_EROFS_FS_XATTR
3249 - vi->xattr_isize ? &erofs_dir_xattr_iops :
3250 -#endif
3251 + &erofs_dir_xattr_iops;
3252 +#else
3253 &erofs_dir_iops;
3254 +#endif
3255 inode->i_fop = &erofs_dir_fops;
3256 } else if (S_ISLNK(inode->i_mode)) {
3257 /* by default, page_get_link is used for symlink */
3258 diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
3259 index 9f44ed8f0023..c70f0c5237ea 100644
3260 --- a/drivers/staging/erofs/internal.h
3261 +++ b/drivers/staging/erofs/internal.h
3262 @@ -327,12 +327,17 @@ static inline erofs_off_t iloc(struct erofs_sb_info *sbi, erofs_nid_t nid)
3263 return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
3264 }
3265
3266 -#define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1)
3267 -#define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1)
3268 +/* atomic flag definitions */
3269 +#define EROFS_V_EA_INITED_BIT 0
3270 +
3271 +/* bitlock definitions (arranged in reverse order) */
3272 +#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1)
3273
3274 struct erofs_vnode {
3275 erofs_nid_t nid;
3276 - unsigned int flags;
3277 +
3278 + /* atomic flags (including bitlocks) */
3279 + unsigned long flags;
3280
3281 unsigned char data_mapping_mode;
3282 /* inline size in bytes */
3283 @@ -485,8 +490,9 @@ struct erofs_map_blocks_iter {
3284 };
3285
3286
3287 -static inline struct page *erofs_get_inline_page(struct inode *inode,
3288 - erofs_blk_t blkaddr)
3289 +static inline struct page *
3290 +erofs_get_inline_page(struct inode *inode,
3291 + erofs_blk_t blkaddr)
3292 {
3293 return erofs_get_meta_page(inode->i_sb,
3294 blkaddr, S_ISDIR(inode->i_mode));
3295 diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
3296 index 546a47156101..023f64fa2c87 100644
3297 --- a/drivers/staging/erofs/namei.c
3298 +++ b/drivers/staging/erofs/namei.c
3299 @@ -15,74 +15,77 @@
3300
3301 #include <trace/events/erofs.h>
3302
3303 -/* based on the value of qn->len is accurate */
3304 -static inline int dirnamecmp(struct qstr *qn,
3305 - struct qstr *qd, unsigned *matched)
3306 +struct erofs_qstr {
3307 + const unsigned char *name;
3308 + const unsigned char *end;
3309 +};
3310 +
3311 +/* based on the end of qn is accurate and it must have the trailing '\0' */
3312 +static inline int dirnamecmp(const struct erofs_qstr *qn,
3313 + const struct erofs_qstr *qd,
3314 + unsigned int *matched)
3315 {
3316 - unsigned i = *matched, len = min(qn->len, qd->len);
3317 -loop:
3318 - if (unlikely(i >= len)) {
3319 - *matched = i;
3320 - if (qn->len < qd->len) {
3321 - /*
3322 - * actually (qn->len == qd->len)
3323 - * when qd->name[i] == '\0'
3324 - */
3325 - return qd->name[i] == '\0' ? 0 : -1;
3326 + unsigned int i = *matched;
3327 +
3328 + /*
3329 + * on-disk error, let's only BUG_ON in the debugging mode.
3330 + * otherwise, it will return 1 to just skip the invalid name
3331 + * and go on (in consideration of the lookup performance).
3332 + */
3333 + DBG_BUGON(qd->name > qd->end);
3334 +
3335 + /* qd could not have trailing '\0' */
3336 + /* However it is absolutely safe if < qd->end */
3337 + while (qd->name + i < qd->end && qd->name[i] != '\0') {
3338 + if (qn->name[i] != qd->name[i]) {
3339 + *matched = i;
3340 + return qn->name[i] > qd->name[i] ? 1 : -1;
3341 }
3342 - return (qn->len > qd->len);
3343 + ++i;
3344 }
3345 -
3346 - if (qn->name[i] != qd->name[i]) {
3347 - *matched = i;
3348 - return qn->name[i] > qd->name[i] ? 1 : -1;
3349 - }
3350 -
3351 - ++i;
3352 - goto loop;
3353 + *matched = i;
3354 + /* See comments in __d_alloc on the terminating NUL character */
3355 + return qn->name[i] == '\0' ? 0 : 1;
3356 }
3357
3358 -static struct erofs_dirent *find_target_dirent(
3359 - struct qstr *name,
3360 - u8 *data, int maxsize)
3361 +#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
3362 +
3363 +static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
3364 + u8 *data,
3365 + unsigned int dirblksize,
3366 + const int ndirents)
3367 {
3368 - unsigned ndirents, head, back;
3369 - unsigned startprfx, endprfx;
3370 + int head, back;
3371 + unsigned int startprfx, endprfx;
3372 struct erofs_dirent *const de = (struct erofs_dirent *)data;
3373
3374 - /* make sure that maxsize is valid */
3375 - BUG_ON(maxsize < sizeof(struct erofs_dirent));
3376 -
3377 - ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
3378 -
3379 - /* corrupted dir (may be unnecessary...) */
3380 - BUG_ON(!ndirents);
3381 -
3382 - head = 0;
3383 + /* since the 1st dirent has been evaluated previously */
3384 + head = 1;
3385 back = ndirents - 1;
3386 startprfx = endprfx = 0;
3387
3388 while (head <= back) {
3389 - unsigned mid = head + (back - head) / 2;
3390 - unsigned nameoff = le16_to_cpu(de[mid].nameoff);
3391 - unsigned matched = min(startprfx, endprfx);
3392 -
3393 - struct qstr dname = QSTR_INIT(data + nameoff,
3394 - unlikely(mid >= ndirents - 1) ?
3395 - maxsize - nameoff :
3396 - le16_to_cpu(de[mid + 1].nameoff) - nameoff);
3397 + const int mid = head + (back - head) / 2;
3398 + const int nameoff = nameoff_from_disk(de[mid].nameoff,
3399 + dirblksize);
3400 + unsigned int matched = min(startprfx, endprfx);
3401 + struct erofs_qstr dname = {
3402 + .name = data + nameoff,
3403 + .end = unlikely(mid >= ndirents - 1) ?
3404 + data + dirblksize :
3405 + data + nameoff_from_disk(de[mid + 1].nameoff,
3406 + dirblksize)
3407 + };
3408
3409 /* string comparison without already matched prefix */
3410 int ret = dirnamecmp(name, &dname, &matched);
3411
3412 - if (unlikely(!ret))
3413 + if (unlikely(!ret)) {
3414 return de + mid;
3415 - else if (ret > 0) {
3416 + } else if (ret > 0) {
3417 head = mid + 1;
3418 startprfx = matched;
3419 - } else if (unlikely(mid < 1)) /* fix "mid" overflow */
3420 - break;
3421 - else {
3422 + } else {
3423 back = mid - 1;
3424 endprfx = matched;
3425 }
3426 @@ -91,12 +94,12 @@ static struct erofs_dirent *find_target_dirent(
3427 return ERR_PTR(-ENOENT);
3428 }
3429
3430 -static struct page *find_target_block_classic(
3431 - struct inode *dir,
3432 - struct qstr *name, int *_diff)
3433 +static struct page *find_target_block_classic(struct inode *dir,
3434 + struct erofs_qstr *name,
3435 + int *_ndirents)
3436 {
3437 - unsigned startprfx, endprfx;
3438 - unsigned head, back;
3439 + unsigned int startprfx, endprfx;
3440 + int head, back;
3441 struct address_space *const mapping = dir->i_mapping;
3442 struct page *candidate = ERR_PTR(-ENOENT);
3443
3444 @@ -105,41 +108,43 @@ static struct page *find_target_block_classic(
3445 back = inode_datablocks(dir) - 1;
3446
3447 while (head <= back) {
3448 - unsigned mid = head + (back - head) / 2;
3449 + const int mid = head + (back - head) / 2;
3450 struct page *page = read_mapping_page(mapping, mid, NULL);
3451
3452 - if (IS_ERR(page)) {
3453 -exact_out:
3454 - if (!IS_ERR(candidate)) /* valid candidate */
3455 - put_page(candidate);
3456 - return page;
3457 - } else {
3458 - int diff;
3459 - unsigned ndirents, matched;
3460 - struct qstr dname;
3461 + if (!IS_ERR(page)) {
3462 struct erofs_dirent *de = kmap_atomic(page);
3463 - unsigned nameoff = le16_to_cpu(de->nameoff);
3464 -
3465 - ndirents = nameoff / sizeof(*de);
3466 + const int nameoff = nameoff_from_disk(de->nameoff,
3467 + EROFS_BLKSIZ);
3468 + const int ndirents = nameoff / sizeof(*de);
3469 + int diff;
3470 + unsigned int matched;
3471 + struct erofs_qstr dname;
3472
3473 - /* corrupted dir (should have one entry at least) */
3474 - BUG_ON(!ndirents || nameoff > PAGE_SIZE);
3475 + if (unlikely(!ndirents)) {
3476 + DBG_BUGON(1);
3477 + kunmap_atomic(de);
3478 + put_page(page);
3479 + page = ERR_PTR(-EIO);
3480 + goto out;
3481 + }
3482
3483 matched = min(startprfx, endprfx);
3484
3485 dname.name = (u8 *)de + nameoff;
3486 - dname.len = ndirents == 1 ?
3487 - /* since the rest of the last page is 0 */
3488 - EROFS_BLKSIZ - nameoff
3489 - : le16_to_cpu(de[1].nameoff) - nameoff;
3490 + if (ndirents == 1)
3491 + dname.end = (u8 *)de + EROFS_BLKSIZ;
3492 + else
3493 + dname.end = (u8 *)de +
3494 + nameoff_from_disk(de[1].nameoff,
3495 + EROFS_BLKSIZ);
3496
3497 /* string comparison without already matched prefix */
3498 diff = dirnamecmp(name, &dname, &matched);
3499 kunmap_atomic(de);
3500
3501 if (unlikely(!diff)) {
3502 - *_diff = 0;
3503 - goto exact_out;
3504 + *_ndirents = 0;
3505 + goto out;
3506 } else if (diff > 0) {
3507 head = mid + 1;
3508 startprfx = matched;
3509 @@ -147,45 +152,51 @@ exact_out:
3510 if (likely(!IS_ERR(candidate)))
3511 put_page(candidate);
3512 candidate = page;
3513 + *_ndirents = ndirents;
3514 } else {
3515 put_page(page);
3516
3517 - if (unlikely(mid < 1)) /* fix "mid" overflow */
3518 - break;
3519 -
3520 back = mid - 1;
3521 endprfx = matched;
3522 }
3523 + continue;
3524 }
3525 +out: /* free if the candidate is valid */
3526 + if (!IS_ERR(candidate))
3527 + put_page(candidate);
3528 + return page;
3529 }
3530 - *_diff = 1;
3531 return candidate;
3532 }
3533
3534 int erofs_namei(struct inode *dir,
3535 - struct qstr *name,
3536 - erofs_nid_t *nid, unsigned *d_type)
3537 + struct qstr *name,
3538 + erofs_nid_t *nid, unsigned int *d_type)
3539 {
3540 - int diff;
3541 + int ndirents;
3542 struct page *page;
3543 - u8 *data;
3544 + void *data;
3545 struct erofs_dirent *de;
3546 + struct erofs_qstr qn;
3547
3548 if (unlikely(!dir->i_size))
3549 return -ENOENT;
3550
3551 - diff = 1;
3552 - page = find_target_block_classic(dir, name, &diff);
3553 + qn.name = name->name;
3554 + qn.end = name->name + name->len;
3555 +
3556 + ndirents = 0;
3557 + page = find_target_block_classic(dir, &qn, &ndirents);
3558
3559 if (unlikely(IS_ERR(page)))
3560 return PTR_ERR(page);
3561
3562 data = kmap_atomic(page);
3563 /* the target page has been mapped */
3564 - de = likely(diff) ?
3565 - /* since the rest of the last page is 0 */
3566 - find_target_dirent(name, data, EROFS_BLKSIZ) :
3567 - (struct erofs_dirent *)data;
3568 + if (ndirents)
3569 + de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
3570 + else
3571 + de = (struct erofs_dirent *)data;
3572
3573 if (likely(!IS_ERR(de))) {
3574 *nid = le64_to_cpu(de->nid);
3575 diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
3576 index 0e9cfeccdf99..2db99cff3c99 100644
3577 --- a/drivers/staging/erofs/xattr.c
3578 +++ b/drivers/staging/erofs/xattr.c
3579 @@ -24,36 +24,77 @@ struct xattr_iter {
3580
3581 static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
3582 {
3583 - /* only init_inode_xattrs use non-atomic once */
3584 + /* the only user of kunmap() is 'init_inode_xattrs' */
3585 if (unlikely(!atomic))
3586 kunmap(it->page);
3587 else
3588 kunmap_atomic(it->kaddr);
3589 +
3590 unlock_page(it->page);
3591 put_page(it->page);
3592 }
3593
3594 -static void init_inode_xattrs(struct inode *inode)
3595 +static inline void xattr_iter_end_final(struct xattr_iter *it)
3596 +{
3597 + if (!it->page)
3598 + return;
3599 +
3600 + xattr_iter_end(it, true);
3601 +}
3602 +
3603 +static int init_inode_xattrs(struct inode *inode)
3604 {
3605 + struct erofs_vnode *const vi = EROFS_V(inode);
3606 struct xattr_iter it;
3607 unsigned i;
3608 struct erofs_xattr_ibody_header *ih;
3609 struct erofs_sb_info *sbi;
3610 - struct erofs_vnode *vi;
3611 bool atomic_map;
3612 + int ret = 0;
3613
3614 - if (likely(inode_has_inited_xattr(inode)))
3615 - return;
3616 + /* the most case is that xattrs of this inode are initialized. */
3617 + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
3618 + return 0;
3619 +
3620 + if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
3621 + return -ERESTARTSYS;
3622
3623 - vi = EROFS_V(inode);
3624 - BUG_ON(!vi->xattr_isize);
3625 + /* someone has initialized xattrs for us? */
3626 + if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
3627 + goto out_unlock;
3628 +
3629 + /*
3630 + * bypass all xattr operations if ->xattr_isize is not greater than
3631 + * sizeof(struct erofs_xattr_ibody_header), in detail:
3632 + * 1) it is not enough to contain erofs_xattr_ibody_header then
3633 + * ->xattr_isize should be 0 (it means no xattr);
3634 + * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
3635 + * undefined right now (maybe use later with some new sb feature).
3636 + */
3637 + if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
3638 + errln("xattr_isize %d of nid %llu is not supported yet",
3639 + vi->xattr_isize, vi->nid);
3640 + ret = -ENOTSUPP;
3641 + goto out_unlock;
3642 + } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
3643 + if (unlikely(vi->xattr_isize)) {
3644 + DBG_BUGON(1);
3645 + ret = -EIO;
3646 + goto out_unlock; /* xattr ondisk layout error */
3647 + }
3648 + ret = -ENOATTR;
3649 + goto out_unlock;
3650 + }
3651
3652 sbi = EROFS_I_SB(inode);
3653 it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
3654 it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
3655
3656 it.page = erofs_get_inline_page(inode, it.blkaddr);
3657 - BUG_ON(IS_ERR(it.page));
3658 + if (IS_ERR(it.page)) {
3659 + ret = PTR_ERR(it.page);
3660 + goto out_unlock;
3661 + }
3662
3663 /* read in shared xattr array (non-atomic, see kmalloc below) */
3664 it.kaddr = kmap(it.page);
3665 @@ -62,9 +103,13 @@ static void init_inode_xattrs(struct inode *inode)
3666 ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
3667
3668 vi->xattr_shared_count = ih->h_shared_count;
3669 - vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
3670 - vi->xattr_shared_count, sizeof(unsigned),
3671 - GFP_KERNEL | __GFP_NOFAIL);
3672 + vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
3673 + sizeof(uint), GFP_KERNEL);
3674 + if (!vi->xattr_shared_xattrs) {
3675 + xattr_iter_end(&it, atomic_map);
3676 + ret = -ENOMEM;
3677 + goto out_unlock;
3678 + }
3679
3680 /* let's skip ibody header */
3681 it.ofs += sizeof(struct erofs_xattr_ibody_header);
3682 @@ -77,7 +122,12 @@ static void init_inode_xattrs(struct inode *inode)
3683
3684 it.page = erofs_get_meta_page(inode->i_sb,
3685 ++it.blkaddr, S_ISDIR(inode->i_mode));
3686 - BUG_ON(IS_ERR(it.page));
3687 + if (IS_ERR(it.page)) {
3688 + kfree(vi->xattr_shared_xattrs);
3689 + vi->xattr_shared_xattrs = NULL;
3690 + ret = PTR_ERR(it.page);
3691 + goto out_unlock;
3692 + }
3693
3694 it.kaddr = kmap_atomic(it.page);
3695 atomic_map = true;
3696 @@ -89,7 +139,11 @@ static void init_inode_xattrs(struct inode *inode)
3697 }
3698 xattr_iter_end(&it, atomic_map);
3699
3700 - inode_set_inited_xattr(inode);
3701 + set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
3702 +
3703 +out_unlock:
3704 + clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
3705 + return ret;
3706 }
3707
3708 struct xattr_iter_handlers {
3709 @@ -99,18 +153,25 @@ struct xattr_iter_handlers {
3710 void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
3711 };
3712
3713 -static void xattr_iter_fixup(struct xattr_iter *it)
3714 +static inline int xattr_iter_fixup(struct xattr_iter *it)
3715 {
3716 - if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
3717 - xattr_iter_end(it, true);
3718 + if (it->ofs < EROFS_BLKSIZ)
3719 + return 0;
3720
3721 - it->blkaddr += erofs_blknr(it->ofs);
3722 - it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
3723 - BUG_ON(IS_ERR(it->page));
3724 + xattr_iter_end(it, true);
3725
3726 - it->kaddr = kmap_atomic(it->page);
3727 - it->ofs = erofs_blkoff(it->ofs);
3728 + it->blkaddr += erofs_blknr(it->ofs);
3729 + it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
3730 + if (IS_ERR(it->page)) {
3731 + int err = PTR_ERR(it->page);
3732 +
3733 + it->page = NULL;
3734 + return err;
3735 }
3736 +
3737 + it->kaddr = kmap_atomic(it->page);
3738 + it->ofs = erofs_blkoff(it->ofs);
3739 + return 0;
3740 }
3741
3742 static int inline_xattr_iter_begin(struct xattr_iter *it,
3743 @@ -132,21 +193,24 @@ static int inline_xattr_iter_begin(struct xattr_iter *it,
3744 it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
3745
3746 it->page = erofs_get_inline_page(inode, it->blkaddr);
3747 - BUG_ON(IS_ERR(it->page));
3748 - it->kaddr = kmap_atomic(it->page);
3749 + if (IS_ERR(it->page))
3750 + return PTR_ERR(it->page);
3751
3752 + it->kaddr = kmap_atomic(it->page);
3753 return vi->xattr_isize - xattr_header_sz;
3754 }
3755
3756 static int xattr_foreach(struct xattr_iter *it,
3757 - struct xattr_iter_handlers *op, unsigned *tlimit)
3758 + const struct xattr_iter_handlers *op, unsigned int *tlimit)
3759 {
3760 struct erofs_xattr_entry entry;
3761 unsigned value_sz, processed, slice;
3762 int err;
3763
3764 /* 0. fixup blkaddr, ofs, ipage */
3765 - xattr_iter_fixup(it);
3766 + err = xattr_iter_fixup(it);
3767 + if (err)
3768 + return err;
3769
3770 /*
3771 * 1. read xattr entry to the memory,
3772 @@ -178,7 +242,9 @@ static int xattr_foreach(struct xattr_iter *it,
3773 if (it->ofs >= EROFS_BLKSIZ) {
3774 BUG_ON(it->ofs > EROFS_BLKSIZ);
3775
3776 - xattr_iter_fixup(it);
3777 + err = xattr_iter_fixup(it);
3778 + if (err)
3779 + goto out;
3780 it->ofs = 0;
3781 }
3782
3783 @@ -210,7 +276,10 @@ static int xattr_foreach(struct xattr_iter *it,
3784 while (processed < value_sz) {
3785 if (it->ofs >= EROFS_BLKSIZ) {
3786 BUG_ON(it->ofs > EROFS_BLKSIZ);
3787 - xattr_iter_fixup(it);
3788 +
3789 + err = xattr_iter_fixup(it);
3790 + if (err)
3791 + goto out;
3792 it->ofs = 0;
3793 }
3794
3795 @@ -270,7 +339,7 @@ static void xattr_copyvalue(struct xattr_iter *_it,
3796 memcpy(it->buffer + processed, buf, len);
3797 }
3798
3799 -static struct xattr_iter_handlers find_xattr_handlers = {
3800 +static const struct xattr_iter_handlers find_xattr_handlers = {
3801 .entry = xattr_entrymatch,
3802 .name = xattr_namematch,
3803 .alloc_buffer = xattr_checkbuffer,
3804 @@ -291,8 +360,11 @@ static int inline_getxattr(struct inode *inode, struct getxattr_iter *it)
3805 ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
3806 if (ret >= 0)
3807 break;
3808 +
3809 + if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
3810 + break;
3811 }
3812 - xattr_iter_end(&it->it, true);
3813 + xattr_iter_end_final(&it->it);
3814
3815 return ret < 0 ? ret : it->buffer_size;
3816 }
3817 @@ -315,8 +387,10 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
3818 xattr_iter_end(&it->it, true);
3819
3820 it->it.page = erofs_get_meta_page(inode->i_sb,
3821 - blkaddr, false);
3822 - BUG_ON(IS_ERR(it->it.page));
3823 + blkaddr, false);
3824 + if (IS_ERR(it->it.page))
3825 + return PTR_ERR(it->it.page);
3826 +
3827 it->it.kaddr = kmap_atomic(it->it.page);
3828 it->it.blkaddr = blkaddr;
3829 }
3830 @@ -324,9 +398,12 @@ static int shared_getxattr(struct inode *inode, struct getxattr_iter *it)
3831 ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
3832 if (ret >= 0)
3833 break;
3834 +
3835 + if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
3836 + break;
3837 }
3838 if (vi->xattr_shared_count)
3839 - xattr_iter_end(&it->it, true);
3840 + xattr_iter_end_final(&it->it);
3841
3842 return ret < 0 ? ret : it->buffer_size;
3843 }
3844 @@ -351,7 +428,9 @@ int erofs_getxattr(struct inode *inode, int index,
3845 if (unlikely(name == NULL))
3846 return -EINVAL;
3847
3848 - init_inode_xattrs(inode);
3849 + ret = init_inode_xattrs(inode);
3850 + if (ret)
3851 + return ret;
3852
3853 it.index = index;
3854
3855 @@ -374,7 +453,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
3856 struct dentry *unused, struct inode *inode,
3857 const char *name, void *buffer, size_t size)
3858 {
3859 - struct erofs_vnode *const vi = EROFS_V(inode);
3860 struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
3861
3862 switch (handler->flags) {
3863 @@ -392,9 +470,6 @@ static int erofs_xattr_generic_get(const struct xattr_handler *handler,
3864 return -EINVAL;
3865 }
3866
3867 - if (!vi->xattr_isize)
3868 - return -ENOATTR;
3869 -
3870 return erofs_getxattr(inode, handler->flags, name, buffer, size);
3871 }
3872
3873 @@ -494,7 +569,7 @@ static int xattr_skipvalue(struct xattr_iter *_it,
3874 return 1;
3875 }
3876
3877 -static struct xattr_iter_handlers list_xattr_handlers = {
3878 +static const struct xattr_iter_handlers list_xattr_handlers = {
3879 .entry = xattr_entrylist,
3880 .name = xattr_namelist,
3881 .alloc_buffer = xattr_skipvalue,
3882 @@ -516,7 +591,7 @@ static int inline_listxattr(struct listxattr_iter *it)
3883 if (ret < 0)
3884 break;
3885 }
3886 - xattr_iter_end(&it->it, true);
3887 + xattr_iter_end_final(&it->it);
3888 return ret < 0 ? ret : it->buffer_ofs;
3889 }
3890
3891 @@ -538,8 +613,10 @@ static int shared_listxattr(struct listxattr_iter *it)
3892 xattr_iter_end(&it->it, true);
3893
3894 it->it.page = erofs_get_meta_page(inode->i_sb,
3895 - blkaddr, false);
3896 - BUG_ON(IS_ERR(it->it.page));
3897 + blkaddr, false);
3898 + if (IS_ERR(it->it.page))
3899 + return PTR_ERR(it->it.page);
3900 +
3901 it->it.kaddr = kmap_atomic(it->it.page);
3902 it->it.blkaddr = blkaddr;
3903 }
3904 @@ -549,7 +626,7 @@ static int shared_listxattr(struct listxattr_iter *it)
3905 break;
3906 }
3907 if (vi->xattr_shared_count)
3908 - xattr_iter_end(&it->it, true);
3909 + xattr_iter_end_final(&it->it);
3910
3911 return ret < 0 ? ret : it->buffer_ofs;
3912 }
3913 @@ -560,7 +637,9 @@ ssize_t erofs_listxattr(struct dentry *dentry,
3914 int ret;
3915 struct listxattr_iter it;
3916
3917 - init_inode_xattrs(d_inode(dentry));
3918 + ret = init_inode_xattrs(d_inode(dentry));
3919 + if (ret)
3920 + return ret;
3921
3922 it.dentry = dentry;
3923 it.buffer = buffer;
3924 diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
3925 index d7312eed6088..91ea3083e7ad 100644
3926 --- a/drivers/usb/phy/Kconfig
3927 +++ b/drivers/usb/phy/Kconfig
3928 @@ -21,7 +21,7 @@ config AB8500_USB
3929
3930 config FSL_USB2_OTG
3931 bool "Freescale USB OTG Transceiver Driver"
3932 - depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
3933 + depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
3934 depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
3935 select USB_PHY
3936 help
3937 diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
3938 index d441244b79df..28d9c2b1b3bb 100644
3939 --- a/fs/autofs/expire.c
3940 +++ b/fs/autofs/expire.c
3941 @@ -596,7 +596,6 @@ int autofs_expire_run(struct super_block *sb,
3942 pkt.len = dentry->d_name.len;
3943 memcpy(pkt.name, dentry->d_name.name, pkt.len);
3944 pkt.name[pkt.len] = '\0';
3945 - dput(dentry);
3946
3947 if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
3948 ret = -EFAULT;
3949 @@ -609,6 +608,8 @@ int autofs_expire_run(struct super_block *sb,
3950 complete_all(&ino->expire_complete);
3951 spin_unlock(&sbi->fs_lock);
3952
3953 + dput(dentry);
3954 +
3955 return ret;
3956 }
3957
3958 diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
3959 index 846c052569dd..3c14a8e45ffb 100644
3960 --- a/fs/autofs/inode.c
3961 +++ b/fs/autofs/inode.c
3962 @@ -255,8 +255,10 @@ int autofs_fill_super(struct super_block *s, void *data, int silent)
3963 }
3964 root_inode = autofs_get_inode(s, S_IFDIR | 0755);
3965 root = d_make_root(root_inode);
3966 - if (!root)
3967 + if (!root) {
3968 + ret = -ENOMEM;
3969 goto fail_ino;
3970 + }
3971 pipe = NULL;
3972
3973 root->d_fsdata = ino;
3974 diff --git a/fs/buffer.c b/fs/buffer.c
3975 index 6f1ae3ac9789..c083c4b3c1e7 100644
3976 --- a/fs/buffer.c
3977 +++ b/fs/buffer.c
3978 @@ -200,6 +200,7 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
3979 struct buffer_head *head;
3980 struct page *page;
3981 int all_mapped = 1;
3982 + static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
3983
3984 index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
3985 page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
3986 @@ -227,15 +228,15 @@ __find_get_block_slow(struct block_device *bdev, sector_t block)
3987 * file io on the block device and getblk. It gets dealt with
3988 * elsewhere, don't buffer_error if we had some unmapped buffers
3989 */
3990 - if (all_mapped) {
3991 - printk("__find_get_block_slow() failed. "
3992 - "block=%llu, b_blocknr=%llu\n",
3993 - (unsigned long long)block,
3994 - (unsigned long long)bh->b_blocknr);
3995 - printk("b_state=0x%08lx, b_size=%zu\n",
3996 - bh->b_state, bh->b_size);
3997 - printk("device %pg blocksize: %d\n", bdev,
3998 - 1 << bd_inode->i_blkbits);
3999 + ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
4000 + if (all_mapped && __ratelimit(&last_warned)) {
4001 + printk("__find_get_block_slow() failed. block=%llu, "
4002 + "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
4003 + "device %pg blocksize: %d\n",
4004 + (unsigned long long)block,
4005 + (unsigned long long)bh->b_blocknr,
4006 + bh->b_state, bh->b_size, bdev,
4007 + 1 << bd_inode->i_blkbits);
4008 }
4009 out_unlock:
4010 spin_unlock(&bd_mapping->private_lock);
4011 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
4012 index 1e5a1171212f..a2d701775c49 100644
4013 --- a/fs/cifs/smb2pdu.c
4014 +++ b/fs/cifs/smb2pdu.c
4015 @@ -2243,10 +2243,12 @@ SMB2_open_free(struct smb_rqst *rqst)
4016 {
4017 int i;
4018
4019 - cifs_small_buf_release(rqst->rq_iov[0].iov_base);
4020 - for (i = 1; i < rqst->rq_nvec; i++)
4021 - if (rqst->rq_iov[i].iov_base != smb2_padding)
4022 - kfree(rqst->rq_iov[i].iov_base);
4023 + if (rqst && rqst->rq_iov) {
4024 + cifs_small_buf_release(rqst->rq_iov[0].iov_base);
4025 + for (i = 1; i < rqst->rq_nvec; i++)
4026 + if (rqst->rq_iov[i].iov_base != smb2_padding)
4027 + kfree(rqst->rq_iov[i].iov_base);
4028 + }
4029 }
4030
4031 int
4032 @@ -2535,7 +2537,8 @@ SMB2_close_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
4033 void
4034 SMB2_close_free(struct smb_rqst *rqst)
4035 {
4036 - cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4037 + if (rqst && rqst->rq_iov)
4038 + cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4039 }
4040
4041 int
4042 @@ -2685,7 +2688,8 @@ SMB2_query_info_init(struct cifs_tcon *tcon, struct smb_rqst *rqst,
4043 void
4044 SMB2_query_info_free(struct smb_rqst *rqst)
4045 {
4046 - cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4047 + if (rqst && rqst->rq_iov)
4048 + cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
4049 }
4050
4051 static int
4052 diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
4053 index 8fb7887f2b3d..437257d1116f 100644
4054 --- a/fs/cifs/smb2pdu.h
4055 +++ b/fs/cifs/smb2pdu.h
4056 @@ -84,8 +84,8 @@
4057
4058 #define NUMBER_OF_SMB2_COMMANDS 0x0013
4059
4060 -/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
4061 -#define MAX_SMB2_HDR_SIZE 0x00b0
4062 +/* 52 transform hdr + 64 hdr + 88 create rsp */
4063 +#define MAX_SMB2_HDR_SIZE 204
4064
4065 #define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
4066 #define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
4067 diff --git a/fs/drop_caches.c b/fs/drop_caches.c
4068 index 82377017130f..d31b6c72b476 100644
4069 --- a/fs/drop_caches.c
4070 +++ b/fs/drop_caches.c
4071 @@ -21,8 +21,13 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
4072 spin_lock(&sb->s_inode_list_lock);
4073 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
4074 spin_lock(&inode->i_lock);
4075 + /*
4076 + * We must skip inodes in unusual state. We may also skip
4077 + * inodes without pages but we deliberately won't in case
4078 + * we need to reschedule to avoid softlockups.
4079 + */
4080 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
4081 - (inode->i_mapping->nrpages == 0)) {
4082 + (inode->i_mapping->nrpages == 0 && !need_resched())) {
4083 spin_unlock(&inode->i_lock);
4084 continue;
4085 }
4086 @@ -30,6 +35,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
4087 spin_unlock(&inode->i_lock);
4088 spin_unlock(&sb->s_inode_list_lock);
4089
4090 + cond_resched();
4091 invalidate_mapping_pages(inode->i_mapping, 0, -1);
4092 iput(toput_inode);
4093 toput_inode = inode;
4094 diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
4095 index 4614ee25f621..9d566e62684c 100644
4096 --- a/fs/gfs2/glock.c
4097 +++ b/fs/gfs2/glock.c
4098 @@ -107,7 +107,7 @@ static int glock_wake_function(wait_queue_entry_t *wait, unsigned int mode,
4099
4100 static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
4101 {
4102 - u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
4103 + u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
4104
4105 return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
4106 }
4107 diff --git a/fs/iomap.c b/fs/iomap.c
4108 index e57fb1e534c5..fac45206418a 100644
4109 --- a/fs/iomap.c
4110 +++ b/fs/iomap.c
4111 @@ -117,6 +117,12 @@ iomap_page_create(struct inode *inode, struct page *page)
4112 atomic_set(&iop->read_count, 0);
4113 atomic_set(&iop->write_count, 0);
4114 bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
4115 +
4116 + /*
4117 + * migrate_page_move_mapping() assumes that pages with private data have
4118 + * their count elevated by 1.
4119 + */
4120 + get_page(page);
4121 set_page_private(page, (unsigned long)iop);
4122 SetPagePrivate(page);
4123 return iop;
4124 @@ -133,6 +139,7 @@ iomap_page_release(struct page *page)
4125 WARN_ON_ONCE(atomic_read(&iop->write_count));
4126 ClearPagePrivate(page);
4127 set_page_private(page, 0);
4128 + put_page(page);
4129 kfree(iop);
4130 }
4131
4132 @@ -565,8 +572,10 @@ iomap_migrate_page(struct address_space *mapping, struct page *newpage,
4133
4134 if (page_has_private(page)) {
4135 ClearPagePrivate(page);
4136 + get_page(newpage);
4137 set_page_private(newpage, page_private(page));
4138 set_page_private(page, 0);
4139 + put_page(page);
4140 SetPagePrivate(newpage);
4141 }
4142
4143 @@ -1778,6 +1787,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4144 loff_t pos = iocb->ki_pos, start = pos;
4145 loff_t end = iocb->ki_pos + count - 1, ret = 0;
4146 unsigned int flags = IOMAP_DIRECT;
4147 + bool wait_for_completion = is_sync_kiocb(iocb);
4148 struct blk_plug plug;
4149 struct iomap_dio *dio;
4150
4151 @@ -1797,7 +1807,6 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4152 dio->end_io = end_io;
4153 dio->error = 0;
4154 dio->flags = 0;
4155 - dio->wait_for_completion = is_sync_kiocb(iocb);
4156
4157 dio->submit.iter = iter;
4158 dio->submit.waiter = current;
4159 @@ -1852,7 +1861,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4160 dio_warn_stale_pagecache(iocb->ki_filp);
4161 ret = 0;
4162
4163 - if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
4164 + if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
4165 !inode->i_sb->s_dio_done_wq) {
4166 ret = sb_init_dio_done_wq(inode->i_sb);
4167 if (ret < 0)
4168 @@ -1868,7 +1877,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4169 if (ret <= 0) {
4170 /* magic error code to fall back to buffered I/O */
4171 if (ret == -ENOTBLK) {
4172 - dio->wait_for_completion = true;
4173 + wait_for_completion = true;
4174 ret = 0;
4175 }
4176 break;
4177 @@ -1890,8 +1899,24 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4178 if (dio->flags & IOMAP_DIO_WRITE_FUA)
4179 dio->flags &= ~IOMAP_DIO_NEED_SYNC;
4180
4181 + /*
4182 + * We are about to drop our additional submission reference, which
4183 + * might be the last reference to the dio. There are three three
4184 + * different ways we can progress here:
4185 + *
4186 + * (a) If this is the last reference we will always complete and free
4187 + * the dio ourselves.
4188 + * (b) If this is not the last reference, and we serve an asynchronous
4189 + * iocb, we must never touch the dio after the decrement, the
4190 + * I/O completion handler will complete and free it.
4191 + * (c) If this is not the last reference, but we serve a synchronous
4192 + * iocb, the I/O completion handler will wake us up on the drop
4193 + * of the final reference, and we will complete and free it here
4194 + * after we got woken by the I/O completion handler.
4195 + */
4196 + dio->wait_for_completion = wait_for_completion;
4197 if (!atomic_dec_and_test(&dio->ref)) {
4198 - if (!dio->wait_for_completion)
4199 + if (!wait_for_completion)
4200 return -EIOCBQUEUED;
4201
4202 for (;;) {
4203 @@ -1908,9 +1933,7 @@ iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
4204 __set_current_state(TASK_RUNNING);
4205 }
4206
4207 - ret = iomap_dio_complete(dio);
4208 -
4209 - return ret;
4210 + return iomap_dio_complete(dio);
4211
4212 out_free_dio:
4213 kfree(dio);
4214 diff --git a/fs/nfs/super.c b/fs/nfs/super.c
4215 index 5ef2c71348bd..6b666d187907 100644
4216 --- a/fs/nfs/super.c
4217 +++ b/fs/nfs/super.c
4218 @@ -1906,6 +1906,11 @@ static int nfs_parse_devname(const char *dev_name,
4219 size_t len;
4220 char *end;
4221
4222 + if (unlikely(!dev_name || !*dev_name)) {
4223 + dfprintk(MOUNT, "NFS: device name not specified\n");
4224 + return -EINVAL;
4225 + }
4226 +
4227 /* Is the host name protected with square brakcets? */
4228 if (*dev_name == '[') {
4229 end = strchr(++dev_name, ']');
4230 diff --git a/fs/proc/generic.c b/fs/proc/generic.c
4231 index 8ae109429a88..e39bac94dead 100644
4232 --- a/fs/proc/generic.c
4233 +++ b/fs/proc/generic.c
4234 @@ -256,7 +256,7 @@ struct dentry *proc_lookup_de(struct inode *dir, struct dentry *dentry,
4235 inode = proc_get_inode(dir->i_sb, de);
4236 if (!inode)
4237 return ERR_PTR(-ENOMEM);
4238 - d_set_d_op(dentry, &proc_misc_dentry_ops);
4239 + d_set_d_op(dentry, de->proc_dops);
4240 return d_splice_alias(inode, dentry);
4241 }
4242 read_unlock(&proc_subdir_lock);
4243 @@ -429,6 +429,8 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
4244 INIT_LIST_HEAD(&ent->pde_openers);
4245 proc_set_user(ent, (*parent)->uid, (*parent)->gid);
4246
4247 + ent->proc_dops = &proc_misc_dentry_ops;
4248 +
4249 out:
4250 return ent;
4251 }
4252 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
4253 index 5185d7f6a51e..95b14196f284 100644
4254 --- a/fs/proc/internal.h
4255 +++ b/fs/proc/internal.h
4256 @@ -44,6 +44,7 @@ struct proc_dir_entry {
4257 struct completion *pde_unload_completion;
4258 const struct inode_operations *proc_iops;
4259 const struct file_operations *proc_fops;
4260 + const struct dentry_operations *proc_dops;
4261 union {
4262 const struct seq_operations *seq_ops;
4263 int (*single_show)(struct seq_file *, void *);
4264 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
4265 index d5e0fcb3439e..a7b12435519e 100644
4266 --- a/fs/proc/proc_net.c
4267 +++ b/fs/proc/proc_net.c
4268 @@ -38,6 +38,22 @@ static struct net *get_proc_net(const struct inode *inode)
4269 return maybe_get_net(PDE_NET(PDE(inode)));
4270 }
4271
4272 +static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
4273 +{
4274 + return 0;
4275 +}
4276 +
4277 +static const struct dentry_operations proc_net_dentry_ops = {
4278 + .d_revalidate = proc_net_d_revalidate,
4279 + .d_delete = always_delete_dentry,
4280 +};
4281 +
4282 +static void pde_force_lookup(struct proc_dir_entry *pde)
4283 +{
4284 + /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
4285 + pde->proc_dops = &proc_net_dentry_ops;
4286 +}
4287 +
4288 static int seq_open_net(struct inode *inode, struct file *file)
4289 {
4290 unsigned int state_size = PDE(inode)->state_size;
4291 @@ -90,6 +106,7 @@ struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
4292 p = proc_create_reg(name, mode, &parent, data);
4293 if (!p)
4294 return NULL;
4295 + pde_force_lookup(p);
4296 p->proc_fops = &proc_net_seq_fops;
4297 p->seq_ops = ops;
4298 p->state_size = state_size;
4299 @@ -133,6 +150,7 @@ struct proc_dir_entry *proc_create_net_data_write(const char *name, umode_t mode
4300 p = proc_create_reg(name, mode, &parent, data);
4301 if (!p)
4302 return NULL;
4303 + pde_force_lookup(p);
4304 p->proc_fops = &proc_net_seq_fops;
4305 p->seq_ops = ops;
4306 p->state_size = state_size;
4307 @@ -181,6 +199,7 @@ struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
4308 p = proc_create_reg(name, mode, &parent, data);
4309 if (!p)
4310 return NULL;
4311 + pde_force_lookup(p);
4312 p->proc_fops = &proc_net_single_fops;
4313 p->single_show = show;
4314 return proc_register(parent, p);
4315 @@ -223,6 +242,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo
4316 p = proc_create_reg(name, mode, &parent, data);
4317 if (!p)
4318 return NULL;
4319 + pde_force_lookup(p);
4320 p->proc_fops = &proc_net_single_fops;
4321 p->single_show = show;
4322 p->write = write;
4323 diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
4324 index bfe1639df02d..97fc498dc767 100644
4325 --- a/include/drm/drm_cache.h
4326 +++ b/include/drm/drm_cache.h
4327 @@ -47,6 +47,24 @@ static inline bool drm_arch_can_wc_memory(void)
4328 return false;
4329 #elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
4330 return false;
4331 +#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
4332 + /*
4333 + * The DRM driver stack is designed to work with cache coherent devices
4334 + * only, but permits an optimization to be enabled in some cases, where
4335 + * for some buffers, both the CPU and the GPU use uncached mappings,
4336 + * removing the need for DMA snooping and allocation in the CPU caches.
4337 + *
4338 + * The use of uncached GPU mappings relies on the correct implementation
4339 + * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
4340 + * will use cached mappings nonetheless. On x86 platforms, this does not
4341 + * seem to matter, as uncached CPU mappings will snoop the caches in any
4342 + * case. However, on ARM and arm64, enabling this optimization on a
4343 + * platform where NoSnoop is ignored results in loss of coherency, which
4344 + * breaks correct operation of the device. Since we have no way of
4345 + * detecting whether NoSnoop works or not, just disable this
4346 + * optimization entirely for ARM and arm64.
4347 + */
4348 + return false;
4349 #else
4350 return true;
4351 #endif
4352 diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
4353 index 8bdbb5f29494..3188c0bef3e7 100644
4354 --- a/include/linux/irqchip/arm-gic-v3.h
4355 +++ b/include/linux/irqchip/arm-gic-v3.h
4356 @@ -319,7 +319,7 @@
4357 #define GITS_TYPER_PLPIS (1UL << 0)
4358 #define GITS_TYPER_VLPIS (1UL << 1)
4359 #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
4360 -#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
4361 +#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
4362 #define GITS_TYPER_IDBITS_SHIFT 8
4363 #define GITS_TYPER_DEVBITS_SHIFT 13
4364 #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
4365 diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
4366 index 7ddfc65586b0..4335bd771ce5 100644
4367 --- a/include/linux/stmmac.h
4368 +++ b/include/linux/stmmac.h
4369 @@ -184,6 +184,7 @@ struct plat_stmmacenet_data {
4370 struct clk *pclk;
4371 struct clk *clk_ptp_ref;
4372 unsigned int clk_ptp_rate;
4373 + unsigned int clk_ref_rate;
4374 struct reset_control *stmmac_rst;
4375 struct stmmac_axi *axi;
4376 int has_gmac4;
4377 diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
4378 index 03cc59ee9c95..cebadd6af4d9 100644
4379 --- a/kernel/bpf/hashtab.c
4380 +++ b/kernel/bpf/hashtab.c
4381 @@ -677,7 +677,7 @@ static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
4382 }
4383
4384 if (htab_is_prealloc(htab)) {
4385 - pcpu_freelist_push(&htab->freelist, &l->fnode);
4386 + __pcpu_freelist_push(&htab->freelist, &l->fnode);
4387 } else {
4388 atomic_dec(&htab->count);
4389 l->htab = htab;
4390 @@ -739,7 +739,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
4391 } else {
4392 struct pcpu_freelist_node *l;
4393
4394 - l = pcpu_freelist_pop(&htab->freelist);
4395 + l = __pcpu_freelist_pop(&htab->freelist);
4396 if (!l)
4397 return ERR_PTR(-E2BIG);
4398 l_new = container_of(l, struct htab_elem, fnode);
4399 diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
4400 index 673fa6fe2d73..0c1b4ba9e90e 100644
4401 --- a/kernel/bpf/percpu_freelist.c
4402 +++ b/kernel/bpf/percpu_freelist.c
4403 @@ -28,8 +28,8 @@ void pcpu_freelist_destroy(struct pcpu_freelist *s)
4404 free_percpu(s->freelist);
4405 }
4406
4407 -static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
4408 - struct pcpu_freelist_node *node)
4409 +static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
4410 + struct pcpu_freelist_node *node)
4411 {
4412 raw_spin_lock(&head->lock);
4413 node->next = head->first;
4414 @@ -37,12 +37,22 @@ static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
4415 raw_spin_unlock(&head->lock);
4416 }
4417
4418 -void pcpu_freelist_push(struct pcpu_freelist *s,
4419 +void __pcpu_freelist_push(struct pcpu_freelist *s,
4420 struct pcpu_freelist_node *node)
4421 {
4422 struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
4423
4424 - __pcpu_freelist_push(head, node);
4425 + ___pcpu_freelist_push(head, node);
4426 +}
4427 +
4428 +void pcpu_freelist_push(struct pcpu_freelist *s,
4429 + struct pcpu_freelist_node *node)
4430 +{
4431 + unsigned long flags;
4432 +
4433 + local_irq_save(flags);
4434 + __pcpu_freelist_push(s, node);
4435 + local_irq_restore(flags);
4436 }
4437
4438 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
4439 @@ -63,7 +73,7 @@ void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
4440 for_each_possible_cpu(cpu) {
4441 again:
4442 head = per_cpu_ptr(s->freelist, cpu);
4443 - __pcpu_freelist_push(head, buf);
4444 + ___pcpu_freelist_push(head, buf);
4445 i++;
4446 buf += elem_size;
4447 if (i == nr_elems)
4448 @@ -74,14 +84,12 @@ again:
4449 local_irq_restore(flags);
4450 }
4451
4452 -struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
4453 +struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
4454 {
4455 struct pcpu_freelist_head *head;
4456 struct pcpu_freelist_node *node;
4457 - unsigned long flags;
4458 int orig_cpu, cpu;
4459
4460 - local_irq_save(flags);
4461 orig_cpu = cpu = raw_smp_processor_id();
4462 while (1) {
4463 head = per_cpu_ptr(s->freelist, cpu);
4464 @@ -89,16 +97,25 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
4465 node = head->first;
4466 if (node) {
4467 head->first = node->next;
4468 - raw_spin_unlock_irqrestore(&head->lock, flags);
4469 + raw_spin_unlock(&head->lock);
4470 return node;
4471 }
4472 raw_spin_unlock(&head->lock);
4473 cpu = cpumask_next(cpu, cpu_possible_mask);
4474 if (cpu >= nr_cpu_ids)
4475 cpu = 0;
4476 - if (cpu == orig_cpu) {
4477 - local_irq_restore(flags);
4478 + if (cpu == orig_cpu)
4479 return NULL;
4480 - }
4481 }
4482 }
4483 +
4484 +struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
4485 +{
4486 + struct pcpu_freelist_node *ret;
4487 + unsigned long flags;
4488 +
4489 + local_irq_save(flags);
4490 + ret = __pcpu_freelist_pop(s);
4491 + local_irq_restore(flags);
4492 + return ret;
4493 +}
4494 diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
4495 index 3049aae8ea1e..c3960118e617 100644
4496 --- a/kernel/bpf/percpu_freelist.h
4497 +++ b/kernel/bpf/percpu_freelist.h
4498 @@ -22,8 +22,12 @@ struct pcpu_freelist_node {
4499 struct pcpu_freelist_node *next;
4500 };
4501
4502 +/* pcpu_freelist_* do spin_lock_irqsave. */
4503 void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
4504 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
4505 +/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
4506 +void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
4507 +struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
4508 void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
4509 u32 nr_elems);
4510 int pcpu_freelist_init(struct pcpu_freelist *);
4511 diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
4512 index 382c09dddf93..cc40b8be1171 100644
4513 --- a/kernel/bpf/syscall.c
4514 +++ b/kernel/bpf/syscall.c
4515 @@ -701,8 +701,13 @@ static int map_lookup_elem(union bpf_attr *attr)
4516
4517 if (bpf_map_is_dev_bound(map)) {
4518 err = bpf_map_offload_lookup_elem(map, key, value);
4519 - } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
4520 - map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
4521 + goto done;
4522 + }
4523 +
4524 + preempt_disable();
4525 + this_cpu_inc(bpf_prog_active);
4526 + if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
4527 + map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
4528 err = bpf_percpu_hash_copy(map, key, value);
4529 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
4530 err = bpf_percpu_array_copy(map, key, value);
4531 @@ -722,7 +727,10 @@ static int map_lookup_elem(union bpf_attr *attr)
4532 rcu_read_unlock();
4533 err = ptr ? 0 : -ENOENT;
4534 }
4535 + this_cpu_dec(bpf_prog_active);
4536 + preempt_enable();
4537
4538 +done:
4539 if (err)
4540 goto free_value;
4541
4542 diff --git a/kernel/events/core.c b/kernel/events/core.c
4543 index 4fb9d5054618..aa996a0854b9 100644
4544 --- a/kernel/events/core.c
4545 +++ b/kernel/events/core.c
4546 @@ -436,18 +436,18 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
4547 void __user *buffer, size_t *lenp,
4548 loff_t *ppos)
4549 {
4550 - int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
4551 -
4552 - if (ret || !write)
4553 - return ret;
4554 -
4555 + int ret;
4556 + int perf_cpu = sysctl_perf_cpu_time_max_percent;
4557 /*
4558 * If throttling is disabled don't allow the write:
4559 */
4560 - if (sysctl_perf_cpu_time_max_percent == 100 ||
4561 - sysctl_perf_cpu_time_max_percent == 0)
4562 + if (write && (perf_cpu == 100 || perf_cpu == 0))
4563 return -EINVAL;
4564
4565 + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
4566 + if (ret || !write)
4567 + return ret;
4568 +
4569 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
4570 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
4571 update_perf_cpu_limits();
4572 diff --git a/kernel/relay.c b/kernel/relay.c
4573 index 04f248644e06..9e0f52375487 100644
4574 --- a/kernel/relay.c
4575 +++ b/kernel/relay.c
4576 @@ -428,6 +428,8 @@ static struct dentry *relay_create_buf_file(struct rchan *chan,
4577 dentry = chan->cb->create_buf_file(tmpname, chan->parent,
4578 S_IRUSR, buf,
4579 &chan->is_global);
4580 + if (IS_ERR(dentry))
4581 + dentry = NULL;
4582
4583 kfree(tmpname);
4584
4585 @@ -461,7 +463,7 @@ static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
4586 dentry = chan->cb->create_buf_file(NULL, NULL,
4587 S_IRUSR, buf,
4588 &chan->is_global);
4589 - if (WARN_ON(dentry))
4590 + if (IS_ERR_OR_NULL(dentry))
4591 goto free_buf;
4592 }
4593
4594 diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
4595 index 9864a35c8bb5..6c28d519447d 100644
4596 --- a/kernel/trace/bpf_trace.c
4597 +++ b/kernel/trace/bpf_trace.c
4598 @@ -1158,22 +1158,12 @@ static int __bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *
4599
4600 int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
4601 {
4602 - int err;
4603 -
4604 - mutex_lock(&bpf_event_mutex);
4605 - err = __bpf_probe_register(btp, prog);
4606 - mutex_unlock(&bpf_event_mutex);
4607 - return err;
4608 + return __bpf_probe_register(btp, prog);
4609 }
4610
4611 int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
4612 {
4613 - int err;
4614 -
4615 - mutex_lock(&bpf_event_mutex);
4616 - err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
4617 - mutex_unlock(&bpf_event_mutex);
4618 - return err;
4619 + return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
4620 }
4621
4622 int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
4623 diff --git a/lib/test_kmod.c b/lib/test_kmod.c
4624 index d82d022111e0..9cf77628fc91 100644
4625 --- a/lib/test_kmod.c
4626 +++ b/lib/test_kmod.c
4627 @@ -632,7 +632,7 @@ static void __kmod_config_free(struct test_config *config)
4628 config->test_driver = NULL;
4629
4630 kfree_const(config->test_fs);
4631 - config->test_driver = NULL;
4632 + config->test_fs = NULL;
4633 }
4634
4635 static void kmod_config_free(struct kmod_test_device *test_dev)
4636 diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
4637 index c6119ad3561e..156991edec2a 100644
4638 --- a/mm/memory_hotplug.c
4639 +++ b/mm/memory_hotplug.c
4640 @@ -1213,11 +1213,13 @@ static inline int pageblock_free(struct page *page)
4641 return PageBuddy(page) && page_order(page) >= pageblock_order;
4642 }
4643
4644 -/* Return the start of the next active pageblock after a given page */
4645 -static struct page *next_active_pageblock(struct page *page)
4646 +/* Return the pfn of the start of the next active pageblock after a given pfn */
4647 +static unsigned long next_active_pageblock(unsigned long pfn)
4648 {
4649 + struct page *page = pfn_to_page(pfn);
4650 +
4651 /* Ensure the starting page is pageblock-aligned */
4652 - BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
4653 + BUG_ON(pfn & (pageblock_nr_pages - 1));
4654
4655 /* If the entire pageblock is free, move to the end of free page */
4656 if (pageblock_free(page)) {
4657 @@ -1225,16 +1227,16 @@ static struct page *next_active_pageblock(struct page *page)
4658 /* be careful. we don't have locks, page_order can be changed.*/
4659 order = page_order(page);
4660 if ((order < MAX_ORDER) && (order >= pageblock_order))
4661 - return page + (1 << order);
4662 + return pfn + (1 << order);
4663 }
4664
4665 - return page + pageblock_nr_pages;
4666 + return pfn + pageblock_nr_pages;
4667 }
4668
4669 -static bool is_pageblock_removable_nolock(struct page *page)
4670 +static bool is_pageblock_removable_nolock(unsigned long pfn)
4671 {
4672 + struct page *page = pfn_to_page(pfn);
4673 struct zone *zone;
4674 - unsigned long pfn;
4675
4676 /*
4677 * We have to be careful here because we are iterating over memory
4678 @@ -1257,12 +1259,14 @@ static bool is_pageblock_removable_nolock(struct page *page)
4679 /* Checks if this range of memory is likely to be hot-removable. */
4680 bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
4681 {
4682 - struct page *page = pfn_to_page(start_pfn);
4683 - struct page *end_page = page + nr_pages;
4684 + unsigned long end_pfn, pfn;
4685 +
4686 + end_pfn = min(start_pfn + nr_pages,
4687 + zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
4688
4689 /* Check the starting page of each pageblock within the range */
4690 - for (; page < end_page; page = next_active_pageblock(page)) {
4691 - if (!is_pageblock_removable_nolock(page))
4692 + for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
4693 + if (!is_pageblock_removable_nolock(pfn))
4694 return false;
4695 cond_resched();
4696 }
4697 @@ -1298,6 +1302,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
4698 i++;
4699 if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
4700 continue;
4701 + /* Check if we got outside of the zone */
4702 + if (zone && !zone_spans_pfn(zone, pfn + i))
4703 + return 0;
4704 page = pfn_to_page(pfn + i);
4705 if (zone && page_zone(page) != zone)
4706 return 0;
4707 diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
4708 index e8090f099eb8..ef0dec20c7d8 100644
4709 --- a/net/batman-adv/bat_v_elp.c
4710 +++ b/net/batman-adv/bat_v_elp.c
4711 @@ -104,6 +104,9 @@ static u32 batadv_v_elp_get_throughput(struct batadv_hardif_neigh_node *neigh)
4712
4713 ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
4714
4715 + /* free the TID stats immediately */
4716 + cfg80211_sinfo_release_content(&sinfo);
4717 +
4718 dev_put(real_netdev);
4719 if (ret == -ENOENT) {
4720 /* Node is not associated anymore! It would be
4721 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
4722 index 5e55cef0cec3..6693e209efe8 100644
4723 --- a/net/bridge/netfilter/ebtables.c
4724 +++ b/net/bridge/netfilter/ebtables.c
4725 @@ -2293,9 +2293,12 @@ static int compat_do_replace(struct net *net, void __user *user,
4726
4727 xt_compat_lock(NFPROTO_BRIDGE);
4728
4729 - ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
4730 - if (ret < 0)
4731 - goto out_unlock;
4732 + if (tmp.nentries) {
4733 + ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
4734 + if (ret < 0)
4735 + goto out_unlock;
4736 + }
4737 +
4738 ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
4739 if (ret < 0)
4740 goto out_unlock;
4741 diff --git a/net/core/filter.c b/net/core/filter.c
4742 index fb0080e84bd4..bed9061102f4 100644
4743 --- a/net/core/filter.c
4744 +++ b/net/core/filter.c
4745 @@ -3909,10 +3909,12 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
4746 /* Only some socketops are supported */
4747 switch (optname) {
4748 case SO_RCVBUF:
4749 + val = min_t(u32, val, sysctl_rmem_max);
4750 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
4751 sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
4752 break;
4753 case SO_SNDBUF:
4754 + val = min_t(u32, val, sysctl_wmem_max);
4755 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
4756 sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
4757 break;
4758 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
4759 index 7f56944b020f..40a7cd56e008 100644
4760 --- a/net/ipv4/ip_vti.c
4761 +++ b/net/ipv4/ip_vti.c
4762 @@ -74,6 +74,33 @@ drop:
4763 return 0;
4764 }
4765
4766 +static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
4767 + int encap_type)
4768 +{
4769 + struct ip_tunnel *tunnel;
4770 + const struct iphdr *iph = ip_hdr(skb);
4771 + struct net *net = dev_net(skb->dev);
4772 + struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
4773 +
4774 + tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
4775 + iph->saddr, iph->daddr, 0);
4776 + if (tunnel) {
4777 + if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
4778 + goto drop;
4779 +
4780 + XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
4781 +
4782 + skb->dev = tunnel->dev;
4783 +
4784 + return xfrm_input(skb, nexthdr, spi, encap_type);
4785 + }
4786 +
4787 + return -EINVAL;
4788 +drop:
4789 + kfree_skb(skb);
4790 + return 0;
4791 +}
4792 +
4793 static int vti_rcv(struct sk_buff *skb)
4794 {
4795 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
4796 @@ -82,6 +109,14 @@ static int vti_rcv(struct sk_buff *skb)
4797 return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
4798 }
4799
4800 +static int vti_rcv_ipip(struct sk_buff *skb)
4801 +{
4802 + XFRM_SPI_SKB_CB(skb)->family = AF_INET;
4803 + XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
4804 +
4805 + return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
4806 +}
4807 +
4808 static int vti_rcv_cb(struct sk_buff *skb, int err)
4809 {
4810 unsigned short family;
4811 @@ -435,6 +470,12 @@ static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
4812 .priority = 100,
4813 };
4814
4815 +static struct xfrm_tunnel ipip_handler __read_mostly = {
4816 + .handler = vti_rcv_ipip,
4817 + .err_handler = vti4_err,
4818 + .priority = 0,
4819 +};
4820 +
4821 static int __net_init vti_init_net(struct net *net)
4822 {
4823 int err;
4824 @@ -603,6 +644,13 @@ static int __init vti_init(void)
4825 if (err < 0)
4826 goto xfrm_proto_comp_failed;
4827
4828 + msg = "ipip tunnel";
4829 + err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
4830 + if (err < 0) {
4831 + pr_info("%s: cant't register tunnel\n",__func__);
4832 + goto xfrm_tunnel_failed;
4833 + }
4834 +
4835 msg = "netlink interface";
4836 err = rtnl_link_register(&vti_link_ops);
4837 if (err < 0)
4838 @@ -612,6 +660,8 @@ static int __init vti_init(void)
4839
4840 rtnl_link_failed:
4841 xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
4842 +xfrm_tunnel_failed:
4843 + xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
4844 xfrm_proto_comp_failed:
4845 xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
4846 xfrm_proto_ah_failed:
4847 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
4848 index 518364f4abcc..55a77314340a 100644
4849 --- a/net/netfilter/ipvs/ip_vs_ctl.c
4850 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
4851 @@ -2220,6 +2220,18 @@ static int ip_vs_set_timeout(struct netns_ipvs *ipvs, struct ip_vs_timeout_user
4852 u->tcp_fin_timeout,
4853 u->udp_timeout);
4854
4855 +#ifdef CONFIG_IP_VS_PROTO_TCP
4856 + if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
4857 + u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
4858 + return -EINVAL;
4859 + }
4860 +#endif
4861 +
4862 +#ifdef CONFIG_IP_VS_PROTO_UDP
4863 + if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
4864 + return -EINVAL;
4865 +#endif
4866 +
4867 #ifdef CONFIG_IP_VS_PROTO_TCP
4868 if (u->tcp_timeout) {
4869 pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
4870 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
4871 index 277d02a8cac8..895171a2e1f1 100644
4872 --- a/net/netfilter/nf_conntrack_core.c
4873 +++ b/net/netfilter/nf_conntrack_core.c
4874 @@ -1007,6 +1007,22 @@ nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
4875 }
4876
4877 if (nf_ct_key_equal(h, tuple, zone, net)) {
4878 + /* Tuple is taken already, so caller will need to find
4879 + * a new source port to use.
4880 + *
4881 + * Only exception:
4882 + * If the *original tuples* are identical, then both
4883 + * conntracks refer to the same flow.
4884 + * This is a rare situation, it can occur e.g. when
4885 + * more than one UDP packet is sent from same socket
4886 + * in different threads.
4887 + *
4888 + * Let nf_ct_resolve_clash() deal with this later.
4889 + */
4890 + if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
4891 + &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
4892 + continue;
4893 +
4894 NF_CT_STAT_INC_ATOMIC(net, found);
4895 rcu_read_unlock();
4896 return 1;
4897 diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
4898 index 0d0d68c989df..1dae02a97ee3 100644
4899 --- a/net/netfilter/xt_TEE.c
4900 +++ b/net/netfilter/xt_TEE.c
4901 @@ -14,6 +14,8 @@
4902 #include <linux/skbuff.h>
4903 #include <linux/route.h>
4904 #include <linux/netfilter/x_tables.h>
4905 +#include <net/net_namespace.h>
4906 +#include <net/netns/generic.h>
4907 #include <net/route.h>
4908 #include <net/netfilter/ipv4/nf_dup_ipv4.h>
4909 #include <net/netfilter/ipv6/nf_dup_ipv6.h>
4910 @@ -25,8 +27,15 @@ struct xt_tee_priv {
4911 int oif;
4912 };
4913
4914 +static unsigned int tee_net_id __read_mostly;
4915 static const union nf_inet_addr tee_zero_address;
4916
4917 +struct tee_net {
4918 + struct list_head priv_list;
4919 + /* lock protects the priv_list */
4920 + struct mutex lock;
4921 +};
4922 +
4923 static unsigned int
4924 tee_tg4(struct sk_buff *skb, const struct xt_action_param *par)
4925 {
4926 @@ -51,17 +60,16 @@ tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
4927 }
4928 #endif
4929
4930 -static DEFINE_MUTEX(priv_list_mutex);
4931 -static LIST_HEAD(priv_list);
4932 -
4933 static int tee_netdev_event(struct notifier_block *this, unsigned long event,
4934 void *ptr)
4935 {
4936 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4937 + struct net *net = dev_net(dev);
4938 + struct tee_net *tn = net_generic(net, tee_net_id);
4939 struct xt_tee_priv *priv;
4940
4941 - mutex_lock(&priv_list_mutex);
4942 - list_for_each_entry(priv, &priv_list, list) {
4943 + mutex_lock(&tn->lock);
4944 + list_for_each_entry(priv, &tn->priv_list, list) {
4945 switch (event) {
4946 case NETDEV_REGISTER:
4947 if (!strcmp(dev->name, priv->tginfo->oif))
4948 @@ -79,13 +87,14 @@ static int tee_netdev_event(struct notifier_block *this, unsigned long event,
4949 break;
4950 }
4951 }
4952 - mutex_unlock(&priv_list_mutex);
4953 + mutex_unlock(&tn->lock);
4954
4955 return NOTIFY_DONE;
4956 }
4957
4958 static int tee_tg_check(const struct xt_tgchk_param *par)
4959 {
4960 + struct tee_net *tn = net_generic(par->net, tee_net_id);
4961 struct xt_tee_tginfo *info = par->targinfo;
4962 struct xt_tee_priv *priv;
4963
4964 @@ -95,6 +104,8 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
4965 return -EINVAL;
4966
4967 if (info->oif[0]) {
4968 + struct net_device *dev;
4969 +
4970 if (info->oif[sizeof(info->oif)-1] != '\0')
4971 return -EINVAL;
4972
4973 @@ -106,9 +117,14 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
4974 priv->oif = -1;
4975 info->priv = priv;
4976
4977 - mutex_lock(&priv_list_mutex);
4978 - list_add(&priv->list, &priv_list);
4979 - mutex_unlock(&priv_list_mutex);
4980 + dev = dev_get_by_name(par->net, info->oif);
4981 + if (dev) {
4982 + priv->oif = dev->ifindex;
4983 + dev_put(dev);
4984 + }
4985 + mutex_lock(&tn->lock);
4986 + list_add(&priv->list, &tn->priv_list);
4987 + mutex_unlock(&tn->lock);
4988 } else
4989 info->priv = NULL;
4990
4991 @@ -118,12 +134,13 @@ static int tee_tg_check(const struct xt_tgchk_param *par)
4992
4993 static void tee_tg_destroy(const struct xt_tgdtor_param *par)
4994 {
4995 + struct tee_net *tn = net_generic(par->net, tee_net_id);
4996 struct xt_tee_tginfo *info = par->targinfo;
4997
4998 if (info->priv) {
4999 - mutex_lock(&priv_list_mutex);
5000 + mutex_lock(&tn->lock);
5001 list_del(&info->priv->list);
5002 - mutex_unlock(&priv_list_mutex);
5003 + mutex_unlock(&tn->lock);
5004 kfree(info->priv);
5005 }
5006 static_key_slow_dec(&xt_tee_enabled);
5007 @@ -156,6 +173,21 @@ static struct xt_target tee_tg_reg[] __read_mostly = {
5008 #endif
5009 };
5010
5011 +static int __net_init tee_net_init(struct net *net)
5012 +{
5013 + struct tee_net *tn = net_generic(net, tee_net_id);
5014 +
5015 + INIT_LIST_HEAD(&tn->priv_list);
5016 + mutex_init(&tn->lock);
5017 + return 0;
5018 +}
5019 +
5020 +static struct pernet_operations tee_net_ops = {
5021 + .init = tee_net_init,
5022 + .id = &tee_net_id,
5023 + .size = sizeof(struct tee_net),
5024 +};
5025 +
5026 static struct notifier_block tee_netdev_notifier = {
5027 .notifier_call = tee_netdev_event,
5028 };
5029 @@ -164,22 +196,32 @@ static int __init tee_tg_init(void)
5030 {
5031 int ret;
5032
5033 - ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5034 - if (ret)
5035 + ret = register_pernet_subsys(&tee_net_ops);
5036 + if (ret < 0)
5037 return ret;
5038 +
5039 + ret = xt_register_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5040 + if (ret < 0)
5041 + goto cleanup_subsys;
5042 +
5043 ret = register_netdevice_notifier(&tee_netdev_notifier);
5044 - if (ret) {
5045 - xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5046 - return ret;
5047 - }
5048 + if (ret < 0)
5049 + goto unregister_targets;
5050
5051 return 0;
5052 +
5053 +unregister_targets:
5054 + xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5055 +cleanup_subsys:
5056 + unregister_pernet_subsys(&tee_net_ops);
5057 + return ret;
5058 }
5059
5060 static void __exit tee_tg_exit(void)
5061 {
5062 unregister_netdevice_notifier(&tee_netdev_notifier);
5063 xt_unregister_targets(tee_tg_reg, ARRAY_SIZE(tee_tg_reg));
5064 + unregister_pernet_subsys(&tee_net_ops);
5065 }
5066
5067 module_init(tee_tg_init);
5068 diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
5069 index 5d3cce9e8744..15eb5d3d4750 100644
5070 --- a/net/vmw_vsock/virtio_transport.c
5071 +++ b/net/vmw_vsock/virtio_transport.c
5072 @@ -75,6 +75,9 @@ static u32 virtio_transport_get_local_cid(void)
5073 {
5074 struct virtio_vsock *vsock = virtio_vsock_get();
5075
5076 + if (!vsock)
5077 + return VMADDR_CID_ANY;
5078 +
5079 return vsock->guest_cid;
5080 }
5081
5082 @@ -584,10 +587,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
5083
5084 virtio_vsock_update_guest_cid(vsock);
5085
5086 - ret = vsock_core_init(&virtio_transport.transport);
5087 - if (ret < 0)
5088 - goto out_vqs;
5089 -
5090 vsock->rx_buf_nr = 0;
5091 vsock->rx_buf_max_nr = 0;
5092 atomic_set(&vsock->queued_replies, 0);
5093 @@ -618,8 +617,6 @@ static int virtio_vsock_probe(struct virtio_device *vdev)
5094 mutex_unlock(&the_virtio_vsock_mutex);
5095 return 0;
5096
5097 -out_vqs:
5098 - vsock->vdev->config->del_vqs(vsock->vdev);
5099 out:
5100 kfree(vsock);
5101 mutex_unlock(&the_virtio_vsock_mutex);
5102 @@ -637,6 +634,9 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
5103 flush_work(&vsock->event_work);
5104 flush_work(&vsock->send_pkt_work);
5105
5106 + /* Reset all connected sockets when the device disappear */
5107 + vsock_for_each_connected_socket(virtio_vsock_reset_sock);
5108 +
5109 vdev->config->reset(vdev);
5110
5111 mutex_lock(&vsock->rx_lock);
5112 @@ -669,7 +669,6 @@ static void virtio_vsock_remove(struct virtio_device *vdev)
5113
5114 mutex_lock(&the_virtio_vsock_mutex);
5115 the_virtio_vsock = NULL;
5116 - vsock_core_exit();
5117 mutex_unlock(&the_virtio_vsock_mutex);
5118
5119 vdev->config->del_vqs(vdev);
5120 @@ -702,14 +701,28 @@ static int __init virtio_vsock_init(void)
5121 virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
5122 if (!virtio_vsock_workqueue)
5123 return -ENOMEM;
5124 +
5125 ret = register_virtio_driver(&virtio_vsock_driver);
5126 if (ret)
5127 - destroy_workqueue(virtio_vsock_workqueue);
5128 + goto out_wq;
5129 +
5130 + ret = vsock_core_init(&virtio_transport.transport);
5131 + if (ret)
5132 + goto out_vdr;
5133 +
5134 + return 0;
5135 +
5136 +out_vdr:
5137 + unregister_virtio_driver(&virtio_vsock_driver);
5138 +out_wq:
5139 + destroy_workqueue(virtio_vsock_workqueue);
5140 return ret;
5141 +
5142 }
5143
5144 static void __exit virtio_vsock_exit(void)
5145 {
5146 + vsock_core_exit();
5147 unregister_virtio_driver(&virtio_vsock_driver);
5148 destroy_workqueue(virtio_vsock_workqueue);
5149 }
5150 diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
5151 index 08c88de0ffda..11975ec8d566 100644
5152 --- a/security/apparmor/domain.c
5153 +++ b/security/apparmor/domain.c
5154 @@ -1444,7 +1444,10 @@ check:
5155 new = aa_label_merge(label, target, GFP_KERNEL);
5156 if (IS_ERR_OR_NULL(new)) {
5157 info = "failed to build target label";
5158 - error = PTR_ERR(new);
5159 + if (!new)
5160 + error = -ENOMEM;
5161 + else
5162 + error = PTR_ERR(new);
5163 new = NULL;
5164 perms.allow = 0;
5165 goto audit;
5166 diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
5167 index b455930a3eaf..ec73d83d0d31 100644
5168 --- a/tools/bpf/bpftool/map.c
5169 +++ b/tools/bpf/bpftool/map.c
5170 @@ -370,6 +370,20 @@ static char **parse_bytes(char **argv, const char *name, unsigned char *val,
5171 return argv + i;
5172 }
5173
5174 +/* on per cpu maps we must copy the provided value on all value instances */
5175 +static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
5176 +{
5177 + unsigned int i, n, step;
5178 +
5179 + if (!map_is_per_cpu(info->type))
5180 + return;
5181 +
5182 + n = get_possible_cpus();
5183 + step = round_up(info->value_size, 8);
5184 + for (i = 1; i < n; i++)
5185 + memcpy(value + i * step, value, info->value_size);
5186 +}
5187 +
5188 static int parse_elem(char **argv, struct bpf_map_info *info,
5189 void *key, void *value, __u32 key_size, __u32 value_size,
5190 __u32 *flags, __u32 **value_fd)
5191 @@ -449,6 +463,8 @@ static int parse_elem(char **argv, struct bpf_map_info *info,
5192 argv = parse_bytes(argv, "value", value, value_size);
5193 if (!argv)
5194 return -1;
5195 +
5196 + fill_per_cpu_value(info, value);
5197 }
5198
5199 return parse_elem(argv, info, key, NULL, key_size, value_size,
5200 diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
5201 index 0de024a6cc2b..bbba0d61570f 100644
5202 --- a/tools/bpf/bpftool/prog.c
5203 +++ b/tools/bpf/bpftool/prog.c
5204 @@ -109,13 +109,14 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
5205
5206 static int prog_fd_by_tag(unsigned char *tag)
5207 {
5208 - struct bpf_prog_info info = {};
5209 - __u32 len = sizeof(info);
5210 unsigned int id = 0;
5211 int err;
5212 int fd;
5213
5214 while (true) {
5215 + struct bpf_prog_info info = {};
5216 + __u32 len = sizeof(info);
5217 +
5218 err = bpf_prog_get_next_id(id, &id);
5219 if (err) {
5220 p_err("%s", strerror(errno));
5221 diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
5222 index 6c1e7ceedcf3..53c11fc0855e 100644
5223 --- a/tools/perf/builtin-script.c
5224 +++ b/tools/perf/builtin-script.c
5225 @@ -1589,13 +1589,8 @@ static void perf_sample__fprint_metric(struct perf_script *script,
5226 .force_header = false,
5227 };
5228 struct perf_evsel *ev2;
5229 - static bool init;
5230 u64 val;
5231
5232 - if (!init) {
5233 - perf_stat__init_shadow_stats();
5234 - init = true;
5235 - }
5236 if (!evsel->stats)
5237 perf_evlist__alloc_stats(script->session->evlist, false);
5238 if (evsel_script(evsel->leader)->gnum++ == 0)
5239 @@ -1658,7 +1653,7 @@ static void process_event(struct perf_script *script,
5240 return;
5241 }
5242
5243 - if (PRINT_FIELD(TRACE)) {
5244 + if (PRINT_FIELD(TRACE) && sample->raw_data) {
5245 event_format__fprintf(evsel->tp_format, sample->cpu,
5246 sample->raw_data, sample->raw_size, fp);
5247 }
5248 @@ -2214,6 +2209,8 @@ static int __cmd_script(struct perf_script *script)
5249
5250 signal(SIGINT, sig_handler);
5251
5252 + perf_stat__init_shadow_stats();
5253 +
5254 /* override event processing functions */
5255 if (script->show_task_events) {
5256 script->tool.comm = process_comm_event;
5257 diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
5258 index 22ab8e67c760..3f43aedb384d 100644
5259 --- a/tools/perf/builtin-trace.c
5260 +++ b/tools/perf/builtin-trace.c
5261 @@ -2263,19 +2263,30 @@ static size_t trace__fprintf_thread_summary(struct trace *trace, FILE *fp);
5262
5263 static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
5264 {
5265 - struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
5266 + bool found = false;
5267 + struct perf_evsel *evsel, *tmp;
5268 + struct parse_events_error err = { .idx = 0, };
5269 + int ret = parse_events(evlist, "probe:vfs_getname*", &err);
5270
5271 - if (IS_ERR(evsel))
5272 + if (ret)
5273 return false;
5274
5275 - if (perf_evsel__field(evsel, "pathname") == NULL) {
5276 + evlist__for_each_entry_safe(evlist, evsel, tmp) {
5277 + if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
5278 + continue;
5279 +
5280 + if (perf_evsel__field(evsel, "pathname")) {
5281 + evsel->handler = trace__vfs_getname;
5282 + found = true;
5283 + continue;
5284 + }
5285 +
5286 + list_del_init(&evsel->node);
5287 + evsel->evlist = NULL;
5288 perf_evsel__delete(evsel);
5289 - return false;
5290 }
5291
5292 - evsel->handler = trace__vfs_getname;
5293 - perf_evlist__add(evlist, evsel);
5294 - return true;
5295 + return found;
5296 }
5297
5298 static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
5299 diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
5300 index 1ccbd3342069..383674f448fc 100644
5301 --- a/tools/perf/util/cpumap.c
5302 +++ b/tools/perf/util/cpumap.c
5303 @@ -134,7 +134,12 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
5304 if (!cpu_list)
5305 return cpu_map__read_all_cpu_map();
5306
5307 - if (!isdigit(*cpu_list))
5308 + /*
5309 + * must handle the case of empty cpumap to cover
5310 + * TOPOLOGY header for NUMA nodes with no CPU
5311 + * ( e.g., because of CPU hotplug)
5312 + */
5313 + if (!isdigit(*cpu_list) && *cpu_list != '\0')
5314 goto out;
5315
5316 while (isdigit(*cpu_list)) {
5317 @@ -181,8 +186,10 @@ struct cpu_map *cpu_map__new(const char *cpu_list)
5318
5319 if (nr_cpus > 0)
5320 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
5321 - else
5322 + else if (*cpu_list != '\0')
5323 cpus = cpu_map__default_new();
5324 + else
5325 + cpus = cpu_map__dummy_new();
5326 invalid:
5327 free(tmp_cpus);
5328 out:
5329 diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
5330 index 6e70cc00c161..a701a8a48f00 100644
5331 --- a/tools/perf/util/symbol-elf.c
5332 +++ b/tools/perf/util/symbol-elf.c
5333 @@ -87,6 +87,11 @@ static inline uint8_t elf_sym__type(const GElf_Sym *sym)
5334 return GELF_ST_TYPE(sym->st_info);
5335 }
5336
5337 +static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
5338 +{
5339 + return GELF_ST_VISIBILITY(sym->st_other);
5340 +}
5341 +
5342 #ifndef STT_GNU_IFUNC
5343 #define STT_GNU_IFUNC 10
5344 #endif
5345 @@ -111,7 +116,9 @@ static inline int elf_sym__is_label(const GElf_Sym *sym)
5346 return elf_sym__type(sym) == STT_NOTYPE &&
5347 sym->st_name != 0 &&
5348 sym->st_shndx != SHN_UNDEF &&
5349 - sym->st_shndx != SHN_ABS;
5350 + sym->st_shndx != SHN_ABS &&
5351 + elf_sym__visibility(sym) != STV_HIDDEN &&
5352 + elf_sym__visibility(sym) != STV_INTERNAL;
5353 }
5354
5355 static bool elf_sym__filter(GElf_Sym *sym)
5356 diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
5357 index 315a44fa32af..84fd6f1bf33e 100644
5358 --- a/tools/testing/selftests/bpf/bpf_util.h
5359 +++ b/tools/testing/selftests/bpf/bpf_util.h
5360 @@ -13,7 +13,7 @@ static inline unsigned int bpf_num_possible_cpus(void)
5361 unsigned int start, end, possible_cpus = 0;
5362 char buff[128];
5363 FILE *fp;
5364 - int n;
5365 + int len, n, i, j = 0;
5366
5367 fp = fopen(fcpu, "r");
5368 if (!fp) {
5369 @@ -21,17 +21,27 @@ static inline unsigned int bpf_num_possible_cpus(void)
5370 exit(1);
5371 }
5372
5373 - while (fgets(buff, sizeof(buff), fp)) {
5374 - n = sscanf(buff, "%u-%u", &start, &end);
5375 - if (n == 0) {
5376 - printf("Failed to retrieve # possible CPUs!\n");
5377 - exit(1);
5378 - } else if (n == 1) {
5379 - end = start;
5380 + if (!fgets(buff, sizeof(buff), fp)) {
5381 + printf("Failed to read %s!\n", fcpu);
5382 + exit(1);
5383 + }
5384 +
5385 + len = strlen(buff);
5386 + for (i = 0; i <= len; i++) {
5387 + if (buff[i] == ',' || buff[i] == '\0') {
5388 + buff[i] = '\0';
5389 + n = sscanf(&buff[j], "%u-%u", &start, &end);
5390 + if (n <= 0) {
5391 + printf("Failed to retrieve # possible CPUs!\n");
5392 + exit(1);
5393 + } else if (n == 1) {
5394 + end = start;
5395 + }
5396 + possible_cpus += end - start + 1;
5397 + j = i + 1;
5398 }
5399 - possible_cpus = start == 0 ? end + 1 : 0;
5400 - break;
5401 }
5402 +
5403 fclose(fp);
5404
5405 return possible_cpus;
5406 diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
5407 index bab13dd025a6..0d26b5e3f966 100755
5408 --- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
5409 +++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
5410 @@ -37,6 +37,10 @@ prerequisite()
5411 exit $ksft_skip
5412 fi
5413
5414 + present_cpus=`cat $SYSFS/devices/system/cpu/present`
5415 + present_max=${present_cpus##*-}
5416 + echo "present_cpus = $present_cpus present_max = $present_max"
5417 +
5418 echo -e "\t Cpus in online state: $online_cpus"
5419
5420 offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
5421 @@ -151,6 +155,8 @@ online_cpus=0
5422 online_max=0
5423 offline_cpus=0
5424 offline_max=0
5425 +present_cpus=0
5426 +present_max=0
5427
5428 while getopts e:ahp: opt; do
5429 case $opt in
5430 @@ -190,9 +196,10 @@ if [ $allcpus -eq 0 ]; then
5431 online_cpu_expect_success $online_max
5432
5433 if [[ $offline_cpus -gt 0 ]]; then
5434 - echo -e "\t offline to online to offline: cpu $offline_max"
5435 - online_cpu_expect_success $offline_max
5436 - offline_cpu_expect_success $offline_max
5437 + echo -e "\t offline to online to offline: cpu $present_max"
5438 + online_cpu_expect_success $present_max
5439 + offline_cpu_expect_success $present_max
5440 + online_cpu $present_max
5441 fi
5442 exit 0
5443 else
5444 diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
5445 index 919aa2ac00af..9a3764a1084e 100644
5446 --- a/tools/testing/selftests/net/Makefile
5447 +++ b/tools/testing/selftests/net/Makefile
5448 @@ -18,6 +18,6 @@ TEST_GEN_PROGS += reuseport_dualstack reuseaddr_conflict tls
5449 KSFT_KHDR_INSTALL := 1
5450 include ../lib.mk
5451
5452 -$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
5453 +$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
5454 $(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
5455 $(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
5456 diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
5457 index 47ed6cef93fb..c9ff2b47bd1c 100644
5458 --- a/tools/testing/selftests/netfilter/Makefile
5459 +++ b/tools/testing/selftests/netfilter/Makefile
5460 @@ -1,6 +1,6 @@
5461 # SPDX-License-Identifier: GPL-2.0
5462 # Makefile for netfilter selftests
5463
5464 -TEST_PROGS := nft_trans_stress.sh
5465 +TEST_PROGS := nft_trans_stress.sh nft_nat.sh
5466
5467 include ../lib.mk
5468 diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
5469 index 1017313e41a8..59caa8f71cd8 100644
5470 --- a/tools/testing/selftests/netfilter/config
5471 +++ b/tools/testing/selftests/netfilter/config
5472 @@ -1,2 +1,2 @@
5473 CONFIG_NET_NS=y
5474 -NF_TABLES_INET=y
5475 +CONFIG_NF_TABLES_INET=y
5476 diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
5477 new file mode 100755
5478 index 000000000000..8ec76681605c
5479 --- /dev/null
5480 +++ b/tools/testing/selftests/netfilter/nft_nat.sh
5481 @@ -0,0 +1,762 @@
5482 +#!/bin/bash
5483 +#
5484 +# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
5485 +#
5486 +
5487 +# Kselftest framework requirement - SKIP code is 4.
5488 +ksft_skip=4
5489 +ret=0
5490 +
5491 +nft --version > /dev/null 2>&1
5492 +if [ $? -ne 0 ];then
5493 + echo "SKIP: Could not run test without nft tool"
5494 + exit $ksft_skip
5495 +fi
5496 +
5497 +ip -Version > /dev/null 2>&1
5498 +if [ $? -ne 0 ];then
5499 + echo "SKIP: Could not run test without ip tool"
5500 + exit $ksft_skip
5501 +fi
5502 +
5503 +ip netns add ns0
5504 +ip netns add ns1
5505 +ip netns add ns2
5506 +
5507 +ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
5508 +ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
5509 +
5510 +ip -net ns0 link set lo up
5511 +ip -net ns0 link set veth0 up
5512 +ip -net ns0 addr add 10.0.1.1/24 dev veth0
5513 +ip -net ns0 addr add dead:1::1/64 dev veth0
5514 +
5515 +ip -net ns0 link set veth1 up
5516 +ip -net ns0 addr add 10.0.2.1/24 dev veth1
5517 +ip -net ns0 addr add dead:2::1/64 dev veth1
5518 +
5519 +for i in 1 2; do
5520 + ip -net ns$i link set lo up
5521 + ip -net ns$i link set eth0 up
5522 + ip -net ns$i addr add 10.0.$i.99/24 dev eth0
5523 + ip -net ns$i route add default via 10.0.$i.1
5524 + ip -net ns$i addr add dead:$i::99/64 dev eth0
5525 + ip -net ns$i route add default via dead:$i::1
5526 +done
5527 +
5528 +bad_counter()
5529 +{
5530 + local ns=$1
5531 + local counter=$2
5532 + local expect=$3
5533 +
5534 + echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
5535 + ip netns exec $ns nft list counter inet filter $counter 1>&2
5536 +}
5537 +
5538 +check_counters()
5539 +{
5540 + ns=$1
5541 + local lret=0
5542 +
5543 + cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
5544 + if [ $? -ne 0 ]; then
5545 + bad_counter $ns ns0in "packets 1 bytes 84"
5546 + lret=1
5547 + fi
5548 + cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
5549 + if [ $? -ne 0 ]; then
5550 + bad_counter $ns ns0out "packets 1 bytes 84"
5551 + lret=1
5552 + fi
5553 +
5554 + expect="packets 1 bytes 104"
5555 + cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
5556 + if [ $? -ne 0 ]; then
5557 + bad_counter $ns ns0in6 "$expect"
5558 + lret=1
5559 + fi
5560 + cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
5561 + if [ $? -ne 0 ]; then
5562 + bad_counter $ns ns0out6 "$expect"
5563 + lret=1
5564 + fi
5565 +
5566 + return $lret
5567 +}
5568 +
5569 +check_ns0_counters()
5570 +{
5571 + local ns=$1
5572 + local lret=0
5573 +
5574 + cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
5575 + if [ $? -ne 0 ]; then
5576 + bad_counter ns0 ns0in "packets 0 bytes 0"
5577 + lret=1
5578 + fi
5579 +
5580 + cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
5581 + if [ $? -ne 0 ]; then
5582 + bad_counter ns0 ns0in6 "packets 0 bytes 0"
5583 + lret=1
5584 + fi
5585 +
5586 + cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
5587 + if [ $? -ne 0 ]; then
5588 + bad_counter ns0 ns0out "packets 0 bytes 0"
5589 + lret=1
5590 + fi
5591 + cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
5592 + if [ $? -ne 0 ]; then
5593 + bad_counter ns0 ns0out6 "packets 0 bytes 0"
5594 + lret=1
5595 + fi
5596 +
5597 + for dir in "in" "out" ; do
5598 + expect="packets 1 bytes 84"
5599 + cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
5600 + if [ $? -ne 0 ]; then
5601 + bad_counter ns0 $ns$dir "$expect"
5602 + lret=1
5603 + fi
5604 +
5605 + expect="packets 1 bytes 104"
5606 + cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
5607 + if [ $? -ne 0 ]; then
5608 + bad_counter ns0 $ns$dir6 "$expect"
5609 + lret=1
5610 + fi
5611 + done
5612 +
5613 + return $lret
5614 +}
5615 +
5616 +reset_counters()
5617 +{
5618 + for i in 0 1 2;do
5619 + ip netns exec ns$i nft reset counters inet > /dev/null
5620 + done
5621 +}
5622 +
5623 +test_local_dnat6()
5624 +{
5625 + local lret=0
5626 +ip netns exec ns0 nft -f - <<EOF
5627 +table ip6 nat {
5628 + chain output {
5629 + type nat hook output priority 0; policy accept;
5630 + ip6 daddr dead:1::99 dnat to dead:2::99
5631 + }
5632 +}
5633 +EOF
5634 + if [ $? -ne 0 ]; then
5635 + echo "SKIP: Could not add add ip6 dnat hook"
5636 + return $ksft_skip
5637 + fi
5638 +
5639 + # ping netns1, expect rewrite to netns2
5640 + ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
5641 + if [ $? -ne 0 ]; then
5642 + lret=1
5643 + echo "ERROR: ping6 failed"
5644 + return $lret
5645 + fi
5646 +
5647 + expect="packets 0 bytes 0"
5648 + for dir in "in6" "out6" ; do
5649 + cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
5650 + if [ $? -ne 0 ]; then
5651 + bad_counter ns0 ns1$dir "$expect"
5652 + lret=1
5653 + fi
5654 + done
5655 +
5656 + expect="packets 1 bytes 104"
5657 + for dir in "in6" "out6" ; do
5658 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
5659 + if [ $? -ne 0 ]; then
5660 + bad_counter ns0 ns2$dir "$expect"
5661 + lret=1
5662 + fi
5663 + done
5664 +
5665 + # expect 0 count in ns1
5666 + expect="packets 0 bytes 0"
5667 + for dir in "in6" "out6" ; do
5668 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5669 + if [ $? -ne 0 ]; then
5670 + bad_counter ns1 ns0$dir "$expect"
5671 + lret=1
5672 + fi
5673 + done
5674 +
5675 + # expect 1 packet in ns2
5676 + expect="packets 1 bytes 104"
5677 + for dir in "in6" "out6" ; do
5678 + cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
5679 + if [ $? -ne 0 ]; then
5680 + bad_counter ns2 ns0$dir "$expect"
5681 + lret=1
5682 + fi
5683 + done
5684 +
5685 + test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
5686 + ip netns exec ns0 nft flush chain ip6 nat output
5687 +
5688 + return $lret
5689 +}
5690 +
5691 +test_local_dnat()
5692 +{
5693 + local lret=0
5694 +ip netns exec ns0 nft -f - <<EOF
5695 +table ip nat {
5696 + chain output {
5697 + type nat hook output priority 0; policy accept;
5698 + ip daddr 10.0.1.99 dnat to 10.0.2.99
5699 + }
5700 +}
5701 +EOF
5702 + # ping netns1, expect rewrite to netns2
5703 + ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
5704 + if [ $? -ne 0 ]; then
5705 + lret=1
5706 + echo "ERROR: ping failed"
5707 + return $lret
5708 + fi
5709 +
5710 + expect="packets 0 bytes 0"
5711 + for dir in "in" "out" ; do
5712 + cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
5713 + if [ $? -ne 0 ]; then
5714 + bad_counter ns0 ns1$dir "$expect"
5715 + lret=1
5716 + fi
5717 + done
5718 +
5719 + expect="packets 1 bytes 84"
5720 + for dir in "in" "out" ; do
5721 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
5722 + if [ $? -ne 0 ]; then
5723 + bad_counter ns0 ns2$dir "$expect"
5724 + lret=1
5725 + fi
5726 + done
5727 +
5728 + # expect 0 count in ns1
5729 + expect="packets 0 bytes 0"
5730 + for dir in "in" "out" ; do
5731 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5732 + if [ $? -ne 0 ]; then
5733 + bad_counter ns1 ns0$dir "$expect"
5734 + lret=1
5735 + fi
5736 + done
5737 +
5738 + # expect 1 packet in ns2
5739 + expect="packets 1 bytes 84"
5740 + for dir in "in" "out" ; do
5741 + cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
5742 + if [ $? -ne 0 ]; then
5743 + bad_counter ns2 ns0$dir "$expect"
5744 + lret=1
5745 + fi
5746 + done
5747 +
5748 + test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
5749 +
5750 + ip netns exec ns0 nft flush chain ip nat output
5751 +
5752 + reset_counters
5753 + ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
5754 + if [ $? -ne 0 ]; then
5755 + lret=1
5756 + echo "ERROR: ping failed"
5757 + return $lret
5758 + fi
5759 +
5760 + expect="packets 1 bytes 84"
5761 + for dir in "in" "out" ; do
5762 + cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
5763 + if [ $? -ne 0 ]; then
5764 + bad_counter ns1 ns1$dir "$expect"
5765 + lret=1
5766 + fi
5767 + done
5768 + expect="packets 0 bytes 0"
5769 + for dir in "in" "out" ; do
5770 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
5771 + if [ $? -ne 0 ]; then
5772 + bad_counter ns0 ns2$dir "$expect"
5773 + lret=1
5774 + fi
5775 + done
5776 +
5777 + # expect 1 count in ns1
5778 + expect="packets 1 bytes 84"
5779 + for dir in "in" "out" ; do
5780 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5781 + if [ $? -ne 0 ]; then
5782 + bad_counter ns0 ns0$dir "$expect"
5783 + lret=1
5784 + fi
5785 + done
5786 +
5787 + # expect 0 packet in ns2
5788 + expect="packets 0 bytes 0"
5789 + for dir in "in" "out" ; do
5790 + cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
5791 + if [ $? -ne 0 ]; then
5792 + bad_counter ns2 ns2$dir "$expect"
5793 + lret=1
5794 + fi
5795 + done
5796 +
5797 + test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
5798 +
5799 + return $lret
5800 +}
5801 +
5802 +
5803 +test_masquerade6()
5804 +{
5805 + local lret=0
5806 +
5807 + ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
5808 +
5809 + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
5810 + if [ $? -ne 0 ] ; then
5811 + echo "ERROR: cannot ping ns1 from ns2 via ipv6"
5812 + return 1
5813 + lret=1
5814 + fi
5815 +
5816 + expect="packets 1 bytes 104"
5817 + for dir in "in6" "out6" ; do
5818 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5819 + if [ $? -ne 0 ]; then
5820 + bad_counter ns1 ns2$dir "$expect"
5821 + lret=1
5822 + fi
5823 +
5824 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
5825 + if [ $? -ne 0 ]; then
5826 + bad_counter ns2 ns1$dir "$expect"
5827 + lret=1
5828 + fi
5829 + done
5830 +
5831 + reset_counters
5832 +
5833 +# add masquerading rule
5834 +ip netns exec ns0 nft -f - <<EOF
5835 +table ip6 nat {
5836 + chain postrouting {
5837 + type nat hook postrouting priority 0; policy accept;
5838 + meta oif veth0 masquerade
5839 + }
5840 +}
5841 +EOF
5842 + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
5843 + if [ $? -ne 0 ] ; then
5844 + echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
5845 + lret=1
5846 + fi
5847 +
5848 + # ns1 should have seen packets from ns0, due to masquerade
5849 + expect="packets 1 bytes 104"
5850 + for dir in "in6" "out6" ; do
5851 +
5852 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5853 + if [ $? -ne 0 ]; then
5854 + bad_counter ns1 ns0$dir "$expect"
5855 + lret=1
5856 + fi
5857 +
5858 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
5859 + if [ $? -ne 0 ]; then
5860 + bad_counter ns2 ns1$dir "$expect"
5861 + lret=1
5862 + fi
5863 + done
5864 +
5865 + # ns1 should not have seen packets from ns2, due to masquerade
5866 + expect="packets 0 bytes 0"
5867 + for dir in "in6" "out6" ; do
5868 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5869 + if [ $? -ne 0 ]; then
5870 + bad_counter ns1 ns0$dir "$expect"
5871 + lret=1
5872 + fi
5873 +
5874 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5875 + if [ $? -ne 0 ]; then
5876 + bad_counter ns2 ns1$dir "$expect"
5877 + lret=1
5878 + fi
5879 + done
5880 +
5881 + ip netns exec ns0 nft flush chain ip6 nat postrouting
5882 + if [ $? -ne 0 ]; then
5883 + echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
5884 + lret=1
5885 + fi
5886 +
5887 + test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
5888 +
5889 + return $lret
5890 +}
5891 +
5892 +test_masquerade()
5893 +{
5894 + local lret=0
5895 +
5896 + ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
5897 + ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
5898 +
5899 + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
5900 + if [ $? -ne 0 ] ; then
5901 + echo "ERROR: canot ping ns1 from ns2"
5902 + lret=1
5903 + fi
5904 +
5905 + expect="packets 1 bytes 84"
5906 + for dir in "in" "out" ; do
5907 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5908 + if [ $? -ne 0 ]; then
5909 + bad_counter ns1 ns2$dir "$expect"
5910 + lret=1
5911 + fi
5912 +
5913 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
5914 + if [ $? -ne 0 ]; then
5915 + bad_counter ns2 ns1$dir "$expect"
5916 + lret=1
5917 + fi
5918 + done
5919 +
5920 + reset_counters
5921 +
5922 +# add masquerading rule
5923 +ip netns exec ns0 nft -f - <<EOF
5924 +table ip nat {
5925 + chain postrouting {
5926 + type nat hook postrouting priority 0; policy accept;
5927 + meta oif veth0 masquerade
5928 + }
5929 +}
5930 +EOF
5931 + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
5932 + if [ $? -ne 0 ] ; then
5933 + echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
5934 + lret=1
5935 + fi
5936 +
5937 + # ns1 should have seen packets from ns0, due to masquerade
5938 + expect="packets 1 bytes 84"
5939 + for dir in "in" "out" ; do
5940 + cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
5941 + if [ $? -ne 0 ]; then
5942 + bad_counter ns1 ns0$dir "$expect"
5943 + lret=1
5944 + fi
5945 +
5946 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
5947 + if [ $? -ne 0 ]; then
5948 + bad_counter ns2 ns1$dir "$expect"
5949 + lret=1
5950 + fi
5951 + done
5952 +
5953 + # ns1 should not have seen packets from ns2, due to masquerade
5954 + expect="packets 0 bytes 0"
5955 + for dir in "in" "out" ; do
5956 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5957 + if [ $? -ne 0 ]; then
5958 + bad_counter ns1 ns0$dir "$expect"
5959 + lret=1
5960 + fi
5961 +
5962 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5963 + if [ $? -ne 0 ]; then
5964 + bad_counter ns2 ns1$dir "$expect"
5965 + lret=1
5966 + fi
5967 + done
5968 +
5969 + ip netns exec ns0 nft flush chain ip nat postrouting
5970 + if [ $? -ne 0 ]; then
5971 + echo "ERROR: Could not flush nat postrouting" 1>&2
5972 + lret=1
5973 + fi
5974 +
5975 + test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
5976 +
5977 + return $lret
5978 +}
5979 +
5980 +test_redirect6()
5981 +{
5982 + local lret=0
5983 +
5984 + ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
5985 +
5986 + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
5987 + if [ $? -ne 0 ] ; then
5988 + echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
5989 + lret=1
5990 + fi
5991 +
5992 + expect="packets 1 bytes 104"
5993 + for dir in "in6" "out6" ; do
5994 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
5995 + if [ $? -ne 0 ]; then
5996 + bad_counter ns1 ns2$dir "$expect"
5997 + lret=1
5998 + fi
5999 +
6000 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6001 + if [ $? -ne 0 ]; then
6002 + bad_counter ns2 ns1$dir "$expect"
6003 + lret=1
6004 + fi
6005 + done
6006 +
6007 + reset_counters
6008 +
6009 +# add redirect rule
6010 +ip netns exec ns0 nft -f - <<EOF
6011 +table ip6 nat {
6012 + chain prerouting {
6013 + type nat hook prerouting priority 0; policy accept;
6014 + meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
6015 + }
6016 +}
6017 +EOF
6018 + ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
6019 + if [ $? -ne 0 ] ; then
6020 + echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
6021 + lret=1
6022 + fi
6023 +
6024 + # ns1 should have seen no packets from ns2, due to redirection
6025 + expect="packets 0 bytes 0"
6026 + for dir in "in6" "out6" ; do
6027 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6028 + if [ $? -ne 0 ]; then
6029 + bad_counter ns1 ns0$dir "$expect"
6030 + lret=1
6031 + fi
6032 + done
6033 +
6034 + # ns0 should have seen packets from ns2, due to masquerade
6035 + expect="packets 1 bytes 104"
6036 + for dir in "in6" "out6" ; do
6037 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
6038 + if [ $? -ne 0 ]; then
6039 + bad_counter ns1 ns0$dir "$expect"
6040 + lret=1
6041 + fi
6042 + done
6043 +
6044 + ip netns exec ns0 nft delete table ip6 nat
6045 + if [ $? -ne 0 ]; then
6046 + echo "ERROR: Could not delete ip6 nat table" 1>&2
6047 + lret=1
6048 + fi
6049 +
6050 + test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
6051 +
6052 + return $lret
6053 +}
6054 +
6055 +test_redirect()
6056 +{
6057 + local lret=0
6058 +
6059 + ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
6060 + ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
6061 +
6062 + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
6063 + if [ $? -ne 0 ] ; then
6064 + echo "ERROR: cannot ping ns1 from ns2"
6065 + lret=1
6066 + fi
6067 +
6068 + expect="packets 1 bytes 84"
6069 + for dir in "in" "out" ; do
6070 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6071 + if [ $? -ne 0 ]; then
6072 + bad_counter ns1 ns2$dir "$expect"
6073 + lret=1
6074 + fi
6075 +
6076 + cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
6077 + if [ $? -ne 0 ]; then
6078 + bad_counter ns2 ns1$dir "$expect"
6079 + lret=1
6080 + fi
6081 + done
6082 +
6083 + reset_counters
6084 +
6085 +# add redirect rule
6086 +ip netns exec ns0 nft -f - <<EOF
6087 +table ip nat {
6088 + chain prerouting {
6089 + type nat hook prerouting priority 0; policy accept;
6090 + meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
6091 + }
6092 +}
6093 +EOF
6094 + ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
6095 + if [ $? -ne 0 ] ; then
6096 + echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
6097 + lret=1
6098 + fi
6099 +
6100 + # ns1 should have seen no packets from ns2, due to redirection
6101 + expect="packets 0 bytes 0"
6102 + for dir in "in" "out" ; do
6103 +
6104 + cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
6105 + if [ $? -ne 0 ]; then
6106 + bad_counter ns1 ns0$dir "$expect"
6107 + lret=1
6108 + fi
6109 + done
6110 +
6111 + # ns0 should have seen packets from ns2, due to masquerade
6112 + expect="packets 1 bytes 84"
6113 + for dir in "in" "out" ; do
6114 + cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
6115 + if [ $? -ne 0 ]; then
6116 + bad_counter ns1 ns0$dir "$expect"
6117 + lret=1
6118 + fi
6119 + done
6120 +
6121 + ip netns exec ns0 nft delete table ip nat
6122 + if [ $? -ne 0 ]; then
6123 + echo "ERROR: Could not delete nat table" 1>&2
6124 + lret=1
6125 + fi
6126 +
6127 + test $lret -eq 0 && echo "PASS: IP redirection for ns2"
6128 +
6129 + return $lret
6130 +}
6131 +
6132 +
6133 +# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
6134 +for i in 0 1 2; do
6135 +ip netns exec ns$i nft -f - <<EOF
6136 +table inet filter {
6137 + counter ns0in {}
6138 + counter ns1in {}
6139 + counter ns2in {}
6140 +
6141 + counter ns0out {}
6142 + counter ns1out {}
6143 + counter ns2out {}
6144 +
6145 + counter ns0in6 {}
6146 + counter ns1in6 {}
6147 + counter ns2in6 {}
6148 +
6149 + counter ns0out6 {}
6150 + counter ns1out6 {}
6151 + counter ns2out6 {}
6152 +
6153 + map nsincounter {
6154 + type ipv4_addr : counter
6155 + elements = { 10.0.1.1 : "ns0in",
6156 + 10.0.2.1 : "ns0in",
6157 + 10.0.1.99 : "ns1in",
6158 + 10.0.2.99 : "ns2in" }
6159 + }
6160 +
6161 + map nsincounter6 {
6162 + type ipv6_addr : counter
6163 + elements = { dead:1::1 : "ns0in6",
6164 + dead:2::1 : "ns0in6",
6165 + dead:1::99 : "ns1in6",
6166 + dead:2::99 : "ns2in6" }
6167 + }
6168 +
6169 + map nsoutcounter {
6170 + type ipv4_addr : counter
6171 + elements = { 10.0.1.1 : "ns0out",
6172 + 10.0.2.1 : "ns0out",
6173 + 10.0.1.99: "ns1out",
6174 + 10.0.2.99: "ns2out" }
6175 + }
6176 +
6177 + map nsoutcounter6 {
6178 + type ipv6_addr : counter
6179 + elements = { dead:1::1 : "ns0out6",
6180 + dead:2::1 : "ns0out6",
6181 + dead:1::99 : "ns1out6",
6182 + dead:2::99 : "ns2out6" }
6183 + }
6184 +
6185 + chain input {
6186 + type filter hook input priority 0; policy accept;
6187 + counter name ip saddr map @nsincounter
6188 + icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
6189 + }
6190 + chain output {
6191 + type filter hook output priority 0; policy accept;
6192 + counter name ip daddr map @nsoutcounter
6193 + icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
6194 + }
6195 +}
6196 +EOF
6197 +done
6198 +
6199 +sleep 3
6200 +# test basic connectivity
6201 +for i in 1 2; do
6202 + ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
6203 + if [ $? -ne 0 ];then
6204 + echo "ERROR: Could not reach other namespace(s)" 1>&2
6205 + ret=1
6206 + fi
6207 +
6208 + ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
6209 + if [ $? -ne 0 ];then
6210 + echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
6211 + ret=1
6212 + fi
6213 + check_counters ns$i
6214 + if [ $? -ne 0 ]; then
6215 + ret=1
6216 + fi
6217 +
6218 + check_ns0_counters ns$i
6219 + if [ $? -ne 0 ]; then
6220 + ret=1
6221 + fi
6222 + reset_counters
6223 +done
6224 +
6225 +if [ $ret -eq 0 ];then
6226 + echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
6227 +fi
6228 +
6229 +reset_counters
6230 +test_local_dnat
6231 +test_local_dnat6
6232 +
6233 +reset_counters
6234 +test_masquerade
6235 +test_masquerade6
6236 +
6237 +reset_counters
6238 +test_redirect
6239 +test_redirect6
6240 +
6241 +for i in 0 1 2; do ip netns del ns$i;done
6242 +
6243 +exit $ret
6244 diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
6245 index 82121a81681f..29bac5ef9a93 100644
6246 --- a/tools/testing/selftests/proc/.gitignore
6247 +++ b/tools/testing/selftests/proc/.gitignore
6248 @@ -10,4 +10,5 @@
6249 /proc-uptime-002
6250 /read
6251 /self
6252 +/setns-dcache
6253 /thread-self
6254 diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
6255 index 1c12c34cf85d..434d033ee067 100644
6256 --- a/tools/testing/selftests/proc/Makefile
6257 +++ b/tools/testing/selftests/proc/Makefile
6258 @@ -14,6 +14,7 @@ TEST_GEN_PROGS += proc-uptime-001
6259 TEST_GEN_PROGS += proc-uptime-002
6260 TEST_GEN_PROGS += read
6261 TEST_GEN_PROGS += self
6262 +TEST_GEN_PROGS += setns-dcache
6263 TEST_GEN_PROGS += thread-self
6264
6265 include ../lib.mk
6266 diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
6267 new file mode 100644
6268 index 000000000000..60ab197a73fc
6269 --- /dev/null
6270 +++ b/tools/testing/selftests/proc/setns-dcache.c
6271 @@ -0,0 +1,129 @@
6272 +/*
6273 + * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
6274 + *
6275 + * Permission to use, copy, modify, and distribute this software for any
6276 + * purpose with or without fee is hereby granted, provided that the above
6277 + * copyright notice and this permission notice appear in all copies.
6278 + *
6279 + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
6280 + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
6281 + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
6282 + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
6283 + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
6284 + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
6285 + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
6286 + */
6287 +/*
6288 + * Test that setns(CLONE_NEWNET) points to new /proc/net content even
6289 + * if old one is in dcache.
6290 + *
6291 + * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
6292 + */
6293 +#undef NDEBUG
6294 +#include <assert.h>
6295 +#include <errno.h>
6296 +#include <sched.h>
6297 +#include <signal.h>
6298 +#include <stdio.h>
6299 +#include <stdlib.h>
6300 +#include <string.h>
6301 +#include <unistd.h>
6302 +#include <sys/types.h>
6303 +#include <sys/stat.h>
6304 +#include <fcntl.h>
6305 +#include <sys/socket.h>
6306 +
6307 +static pid_t pid = -1;
6308 +
6309 +static void f(void)
6310 +{
6311 + if (pid > 0) {
6312 + kill(pid, SIGTERM);
6313 + }
6314 +}
6315 +
6316 +int main(void)
6317 +{
6318 + int fd[2];
6319 + char _ = 0;
6320 + int nsfd;
6321 +
6322 + atexit(f);
6323 +
6324 + /* Check for priviledges and syscall availability straight away. */
6325 + if (unshare(CLONE_NEWNET) == -1) {
6326 + if (errno == ENOSYS || errno == EPERM) {
6327 + return 4;
6328 + }
6329 + return 1;
6330 + }
6331 + /* Distinguisher between two otherwise empty net namespaces. */
6332 + if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
6333 + return 1;
6334 + }
6335 +
6336 + if (pipe(fd) == -1) {
6337 + return 1;
6338 + }
6339 +
6340 + pid = fork();
6341 + if (pid == -1) {
6342 + return 1;
6343 + }
6344 +
6345 + if (pid == 0) {
6346 + if (unshare(CLONE_NEWNET) == -1) {
6347 + return 1;
6348 + }
6349 +
6350 + if (write(fd[1], &_, 1) != 1) {
6351 + return 1;
6352 + }
6353 +
6354 + pause();
6355 +
6356 + return 0;
6357 + }
6358 +
6359 + if (read(fd[0], &_, 1) != 1) {
6360 + return 1;
6361 + }
6362 +
6363 + {
6364 + char buf[64];
6365 + snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
6366 + nsfd = open(buf, O_RDONLY);
6367 + if (nsfd == -1) {
6368 + return 1;
6369 + }
6370 + }
6371 +
6372 + /* Reliably pin dentry into dcache. */
6373 + (void)open("/proc/net/unix", O_RDONLY);
6374 +
6375 + if (setns(nsfd, CLONE_NEWNET) == -1) {
6376 + return 1;
6377 + }
6378 +
6379 + kill(pid, SIGTERM);
6380 + pid = 0;
6381 +
6382 + {
6383 + char buf[4096];
6384 + ssize_t rv;
6385 + int fd;
6386 +
6387 + fd = open("/proc/net/unix", O_RDONLY);
6388 + if (fd == -1) {
6389 + return 1;
6390 + }
6391 +
6392 +#define S "Num RefCount Protocol Flags Type St Inode Path\n"
6393 + rv = read(fd, buf, sizeof(buf));
6394 +
6395 + assert(rv == strlen(S));
6396 + assert(memcmp(buf, S, strlen(S)) == 0);
6397 + }
6398 +
6399 + return 0;
6400 +}
6401 diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
6402 index c02683cfb6c9..7656c7ce79d9 100644
6403 --- a/tools/testing/selftests/timers/Makefile
6404 +++ b/tools/testing/selftests/timers/Makefile
6405 @@ -1,6 +1,6 @@
6406 # SPDX-License-Identifier: GPL-2.0
6407 CFLAGS += -O3 -Wl,-no-as-needed -Wall
6408 -LDFLAGS += -lrt -lpthread -lm
6409 +LDLIBS += -lrt -lpthread -lm
6410
6411 # these are all "safe" tests that don't modify
6412 # system time or require escalated privileges