Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0176-4.19.77-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3473 - (show annotations) (download)
Tue Oct 29 10:31:33 2019 UTC (4 years, 6 months ago) by niro
File size: 244149 byte(s)
-linux-4.19.77
1 diff --git a/Makefile b/Makefile
2 index 9cb471a75a1b..aeabc6459acc 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 4
8 PATCHLEVEL = 19
9 -SUBLEVEL = 76
10 +SUBLEVEL = 77
11 EXTRAVERSION =
12 NAME = "People's Front"
13
14 diff --git a/arch/arm/boot/dts/exynos5420-peach-pit.dts b/arch/arm/boot/dts/exynos5420-peach-pit.dts
15 index 57c2332bf282..25bdc9d97a4d 100644
16 --- a/arch/arm/boot/dts/exynos5420-peach-pit.dts
17 +++ b/arch/arm/boot/dts/exynos5420-peach-pit.dts
18 @@ -437,6 +437,7 @@
19 regulator-name = "vdd_ldo10";
20 regulator-min-microvolt = <1800000>;
21 regulator-max-microvolt = <1800000>;
22 + regulator-always-on;
23 regulator-state-mem {
24 regulator-off-in-suspend;
25 };
26 diff --git a/arch/arm/boot/dts/exynos5800-peach-pi.dts b/arch/arm/boot/dts/exynos5800-peach-pi.dts
27 index d80ab9085da1..7989631b39cc 100644
28 --- a/arch/arm/boot/dts/exynos5800-peach-pi.dts
29 +++ b/arch/arm/boot/dts/exynos5800-peach-pi.dts
30 @@ -437,6 +437,7 @@
31 regulator-name = "vdd_ldo10";
32 regulator-min-microvolt = <1800000>;
33 regulator-max-microvolt = <1800000>;
34 + regulator-always-on;
35 regulator-state-mem {
36 regulator-off-in-suspend;
37 };
38 diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
39 index 895fbde4d433..c1ed83131b49 100644
40 --- a/arch/arm/boot/dts/imx7-colibri.dtsi
41 +++ b/arch/arm/boot/dts/imx7-colibri.dtsi
42 @@ -323,6 +323,7 @@
43 vmmc-supply = <&reg_module_3v3>;
44 vqmmc-supply = <&reg_DCDC3>;
45 non-removable;
46 + sdhci-caps-mask = <0x80000000 0x0>;
47 };
48
49 &iomuxc {
50 diff --git a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
51 index 8bf365d28cac..584418f517a8 100644
52 --- a/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
53 +++ b/arch/arm/boot/dts/imx7d-cl-som-imx7.dts
54 @@ -43,7 +43,7 @@
55 <&clks IMX7D_ENET1_TIME_ROOT_CLK>;
56 assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
57 assigned-clock-rates = <0>, <100000000>;
58 - phy-mode = "rgmii";
59 + phy-mode = "rgmii-id";
60 phy-handle = <&ethphy0>;
61 fsl,magic-packet;
62 status = "okay";
63 @@ -69,7 +69,7 @@
64 <&clks IMX7D_ENET2_TIME_ROOT_CLK>;
65 assigned-clock-parents = <&clks IMX7D_PLL_ENET_MAIN_100M_CLK>;
66 assigned-clock-rates = <0>, <100000000>;
67 - phy-mode = "rgmii";
68 + phy-mode = "rgmii-id";
69 phy-handle = <&ethphy1>;
70 fsl,magic-packet;
71 status = "okay";
72 diff --git a/arch/arm/mach-zynq/platsmp.c b/arch/arm/mach-zynq/platsmp.c
73 index caa6d5fe9078..b296ada97409 100644
74 --- a/arch/arm/mach-zynq/platsmp.c
75 +++ b/arch/arm/mach-zynq/platsmp.c
76 @@ -65,7 +65,7 @@ int zynq_cpun_start(u32 address, int cpu)
77 * 0x4: Jump by mov instruction
78 * 0x8: Jumping address
79 */
80 - memcpy((__force void *)zero, &zynq_secondary_trampoline,
81 + memcpy_toio(zero, &zynq_secondary_trampoline,
82 trampoline_size);
83 writel(address, zero + trampoline_size);
84
85 diff --git a/arch/arm/plat-samsung/watchdog-reset.c b/arch/arm/plat-samsung/watchdog-reset.c
86 index ce42cc640a61..71d85ff323f7 100644
87 --- a/arch/arm/plat-samsung/watchdog-reset.c
88 +++ b/arch/arm/plat-samsung/watchdog-reset.c
89 @@ -62,6 +62,7 @@ void samsung_wdt_reset(void)
90 #ifdef CONFIG_OF
91 static const struct of_device_id s3c2410_wdt_match[] = {
92 { .compatible = "samsung,s3c2410-wdt" },
93 + { .compatible = "samsung,s3c6410-wdt" },
94 {},
95 };
96
97 diff --git a/arch/arm64/boot/dts/rockchip/rk3328.dtsi b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
98 index e065394360bb..92186edefeb9 100644
99 --- a/arch/arm64/boot/dts/rockchip/rk3328.dtsi
100 +++ b/arch/arm64/boot/dts/rockchip/rk3328.dtsi
101 @@ -708,6 +708,7 @@
102 <&cru SCLK_SDMMC_DRV>, <&cru SCLK_SDMMC_SAMPLE>;
103 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
104 fifo-depth = <0x100>;
105 + max-frequency = <150000000>;
106 status = "disabled";
107 };
108
109 @@ -719,6 +720,7 @@
110 <&cru SCLK_SDIO_DRV>, <&cru SCLK_SDIO_SAMPLE>;
111 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
112 fifo-depth = <0x100>;
113 + max-frequency = <150000000>;
114 status = "disabled";
115 };
116
117 @@ -730,6 +732,7 @@
118 <&cru SCLK_EMMC_DRV>, <&cru SCLK_EMMC_SAMPLE>;
119 clock-names = "biu", "ciu", "ciu-drive", "ciu-sample";
120 fifo-depth = <0x100>;
121 + max-frequency = <150000000>;
122 status = "disabled";
123 };
124
125 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
126 index b4a48419769f..9b7d5abd04af 100644
127 --- a/arch/arm64/include/asm/cputype.h
128 +++ b/arch/arm64/include/asm/cputype.h
129 @@ -62,14 +62,6 @@
130 #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \
131 MIDR_ARCHITECTURE_MASK)
132
133 -#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \
134 -({ \
135 - u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \
136 - u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \
137 - \
138 - _model == (model) && rv >= (rv_min) && rv <= (rv_max); \
139 - })
140 -
141 #define ARM_CPU_IMP_ARM 0x41
142 #define ARM_CPU_IMP_APM 0x50
143 #define ARM_CPU_IMP_CAVIUM 0x43
144 @@ -153,10 +145,19 @@ struct midr_range {
145
146 #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf)
147
148 +static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min,
149 + u32 rv_max)
150 +{
151 + u32 _model = midr & MIDR_CPU_MODEL_MASK;
152 + u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK);
153 +
154 + return _model == model && rv >= rv_min && rv <= rv_max;
155 +}
156 +
157 static inline bool is_midr_in_range(u32 midr, struct midr_range const *range)
158 {
159 - return MIDR_IS_CPU_MODEL_RANGE(midr, range->model,
160 - range->rv_min, range->rv_max);
161 + return midr_is_cpu_model_range(midr, range->model,
162 + range->rv_min, range->rv_max);
163 }
164
165 static inline bool
166 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
167 index 2214a403f39b..212a48826655 100644
168 --- a/arch/arm64/include/asm/pgtable.h
169 +++ b/arch/arm64/include/asm/pgtable.h
170 @@ -224,8 +224,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
171 * Only if the new pte is valid and kernel, otherwise TLB maintenance
172 * or update_mmu_cache() have the necessary barriers.
173 */
174 - if (pte_valid_not_user(pte))
175 + if (pte_valid_not_user(pte)) {
176 dsb(ishst);
177 + isb();
178 + }
179 }
180
181 extern void __sync_icache_dcache(pte_t pteval);
182 @@ -432,6 +434,7 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
183 {
184 WRITE_ONCE(*pmdp, pmd);
185 dsb(ishst);
186 + isb();
187 }
188
189 static inline void pmd_clear(pmd_t *pmdp)
190 @@ -483,6 +486,7 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
191 {
192 WRITE_ONCE(*pudp, pud);
193 dsb(ishst);
194 + isb();
195 }
196
197 static inline void pud_clear(pud_t *pudp)
198 diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h
199 index a4a1901140ee..fc247b96619c 100644
200 --- a/arch/arm64/include/asm/tlbflush.h
201 +++ b/arch/arm64/include/asm/tlbflush.h
202 @@ -224,6 +224,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr)
203
204 __tlbi(vaae1is, addr);
205 dsb(ish);
206 + isb();
207 }
208 #endif
209
210 diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
211 index 859d63cc99a3..a897efdb3ddd 100644
212 --- a/arch/arm64/kernel/cpufeature.c
213 +++ b/arch/arm64/kernel/cpufeature.c
214 @@ -846,7 +846,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _
215 u32 midr = read_cpuid_id();
216
217 /* Cavium ThunderX pass 1.x and 2.x */
218 - return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
219 + return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
220 MIDR_CPU_VAR_REV(0, 0),
221 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
222 }
223 diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
224 index 8cce091b6c21..ec6aa1863316 100644
225 --- a/arch/arm64/mm/proc.S
226 +++ b/arch/arm64/mm/proc.S
227 @@ -294,6 +294,15 @@ skip_pgd:
228 msr sctlr_el1, x18
229 isb
230
231 + /*
232 + * Invalidate the local I-cache so that any instructions fetched
233 + * speculatively from the PoC are discarded, since they may have
234 + * been dynamically patched at the PoU.
235 + */
236 + ic iallu
237 + dsb nsh
238 + isb
239 +
240 /* Set the flag to zero to indicate that we're all done */
241 str wzr, [flag_ptr]
242 ret
243 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
244 index 326448f9df16..1a42ba885188 100644
245 --- a/arch/ia64/kernel/module.c
246 +++ b/arch/ia64/kernel/module.c
247 @@ -914,10 +914,14 @@ module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mo
248 void
249 module_arch_cleanup (struct module *mod)
250 {
251 - if (mod->arch.init_unw_table)
252 + if (mod->arch.init_unw_table) {
253 unw_remove_unwind_table(mod->arch.init_unw_table);
254 - if (mod->arch.core_unw_table)
255 + mod->arch.init_unw_table = NULL;
256 + }
257 + if (mod->arch.core_unw_table) {
258 unw_remove_unwind_table(mod->arch.core_unw_table);
259 + mod->arch.core_unw_table = NULL;
260 + }
261 }
262
263 void *dereference_module_function_descriptor(struct module *mod, void *ptr)
264 diff --git a/arch/m68k/include/asm/atarihw.h b/arch/m68k/include/asm/atarihw.h
265 index 9000b249d225..407a617fa3a2 100644
266 --- a/arch/m68k/include/asm/atarihw.h
267 +++ b/arch/m68k/include/asm/atarihw.h
268 @@ -22,7 +22,6 @@
269
270 #include <linux/types.h>
271 #include <asm/bootinfo-atari.h>
272 -#include <asm/raw_io.h>
273 #include <asm/kmap.h>
274
275 extern u_long atari_mch_cookie;
276 @@ -126,14 +125,6 @@ extern struct atari_hw_present atari_hw_present;
277 */
278
279
280 -#define atari_readb raw_inb
281 -#define atari_writeb raw_outb
282 -
283 -#define atari_inb_p raw_inb
284 -#define atari_outb_p raw_outb
285 -
286 -
287 -
288 #include <linux/mm.h>
289 #include <asm/cacheflush.h>
290
291 diff --git a/arch/m68k/include/asm/io_mm.h b/arch/m68k/include/asm/io_mm.h
292 index 782b78f8a048..e056feabbaf0 100644
293 --- a/arch/m68k/include/asm/io_mm.h
294 +++ b/arch/m68k/include/asm/io_mm.h
295 @@ -29,7 +29,11 @@
296 #include <asm-generic/iomap.h>
297
298 #ifdef CONFIG_ATARI
299 -#include <asm/atarihw.h>
300 +#define atari_readb raw_inb
301 +#define atari_writeb raw_outb
302 +
303 +#define atari_inb_p raw_inb
304 +#define atari_outb_p raw_outb
305 #endif
306
307
308 diff --git a/arch/m68k/include/asm/macintosh.h b/arch/m68k/include/asm/macintosh.h
309 index 08cee11180e6..e441517785fd 100644
310 --- a/arch/m68k/include/asm/macintosh.h
311 +++ b/arch/m68k/include/asm/macintosh.h
312 @@ -4,6 +4,7 @@
313
314 #include <linux/seq_file.h>
315 #include <linux/interrupt.h>
316 +#include <linux/irq.h>
317
318 #include <asm/bootinfo-mac.h>
319
320 diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
321 index 828f6656f8f7..649fb268f446 100644
322 --- a/arch/powerpc/platforms/powernv/opal-imc.c
323 +++ b/arch/powerpc/platforms/powernv/opal-imc.c
324 @@ -57,9 +57,9 @@ static void export_imc_mode_and_cmd(struct device_node *node,
325 struct imc_pmu *pmu_ptr)
326 {
327 static u64 loc, *imc_mode_addr, *imc_cmd_addr;
328 - int chip = 0, nid;
329 char mode[16], cmd[16];
330 u32 cb_offset;
331 + struct imc_mem_info *ptr = pmu_ptr->mem_info;
332
333 imc_debugfs_parent = debugfs_create_dir("imc", powerpc_debugfs_root);
334
335 @@ -73,20 +73,20 @@ static void export_imc_mode_and_cmd(struct device_node *node,
336 if (of_property_read_u32(node, "cb_offset", &cb_offset))
337 cb_offset = IMC_CNTL_BLK_OFFSET;
338
339 - for_each_node(nid) {
340 - loc = (u64)(pmu_ptr->mem_info[chip].vbase) + cb_offset;
341 + while (ptr->vbase != NULL) {
342 + loc = (u64)(ptr->vbase) + cb_offset;
343 imc_mode_addr = (u64 *)(loc + IMC_CNTL_BLK_MODE_OFFSET);
344 - sprintf(mode, "imc_mode_%d", nid);
345 + sprintf(mode, "imc_mode_%d", (u32)(ptr->id));
346 if (!imc_debugfs_create_x64(mode, 0600, imc_debugfs_parent,
347 imc_mode_addr))
348 goto err;
349
350 imc_cmd_addr = (u64 *)(loc + IMC_CNTL_BLK_CMD_OFFSET);
351 - sprintf(cmd, "imc_cmd_%d", nid);
352 + sprintf(cmd, "imc_cmd_%d", (u32)(ptr->id));
353 if (!imc_debugfs_create_x64(cmd, 0600, imc_debugfs_parent,
354 imc_cmd_addr))
355 goto err;
356 - chip++;
357 + ptr++;
358 }
359 return;
360
361 diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
362 index 8ff7cb3da1cb..2bc189187ed4 100644
363 --- a/arch/s390/crypto/aes_s390.c
364 +++ b/arch/s390/crypto/aes_s390.c
365 @@ -585,6 +585,9 @@ static int xts_aes_encrypt(struct blkcipher_desc *desc,
366 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
367 struct blkcipher_walk walk;
368
369 + if (!nbytes)
370 + return -EINVAL;
371 +
372 if (unlikely(!xts_ctx->fc))
373 return xts_fallback_encrypt(desc, dst, src, nbytes);
374
375 @@ -599,6 +602,9 @@ static int xts_aes_decrypt(struct blkcipher_desc *desc,
376 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm);
377 struct blkcipher_walk walk;
378
379 + if (!nbytes)
380 + return -EINVAL;
381 +
382 if (unlikely(!xts_ctx->fc))
383 return xts_fallback_decrypt(desc, dst, src, nbytes);
384
385 diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
386 index aebedbaf5260..5d0b72f28140 100644
387 --- a/arch/x86/include/asm/intel-family.h
388 +++ b/arch/x86/include/asm/intel-family.h
389 @@ -58,6 +58,9 @@
390 #define INTEL_FAM6_ICELAKE_MOBILE 0x7E
391 #define INTEL_FAM6_ICELAKE_NNPI 0x9D
392
393 +#define INTEL_FAM6_TIGERLAKE_L 0x8C
394 +#define INTEL_FAM6_TIGERLAKE 0x8D
395 +
396 /* "Small Core" Processors (Atom) */
397
398 #define INTEL_FAM6_ATOM_BONNELL 0x1C /* Diamondville, Pineview */
399 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
400 index b316bd61a6ac..dfdd1caf0d55 100644
401 --- a/arch/x86/kernel/apic/apic.c
402 +++ b/arch/x86/kernel/apic/apic.c
403 @@ -1450,54 +1450,72 @@ static void lapic_setup_esr(void)
404 oldvalue, value);
405 }
406
407 -static void apic_pending_intr_clear(void)
408 +#define APIC_IR_REGS APIC_ISR_NR
409 +#define APIC_IR_BITS (APIC_IR_REGS * 32)
410 +#define APIC_IR_MAPSIZE (APIC_IR_BITS / BITS_PER_LONG)
411 +
412 +union apic_ir {
413 + unsigned long map[APIC_IR_MAPSIZE];
414 + u32 regs[APIC_IR_REGS];
415 +};
416 +
417 +static bool apic_check_and_ack(union apic_ir *irr, union apic_ir *isr)
418 {
419 - long long max_loops = cpu_khz ? cpu_khz : 1000000;
420 - unsigned long long tsc = 0, ntsc;
421 - unsigned int queued;
422 - unsigned long value;
423 - int i, j, acked = 0;
424 + int i, bit;
425 +
426 + /* Read the IRRs */
427 + for (i = 0; i < APIC_IR_REGS; i++)
428 + irr->regs[i] = apic_read(APIC_IRR + i * 0x10);
429 +
430 + /* Read the ISRs */
431 + for (i = 0; i < APIC_IR_REGS; i++)
432 + isr->regs[i] = apic_read(APIC_ISR + i * 0x10);
433
434 - if (boot_cpu_has(X86_FEATURE_TSC))
435 - tsc = rdtsc();
436 /*
437 - * After a crash, we no longer service the interrupts and a pending
438 - * interrupt from previous kernel might still have ISR bit set.
439 - *
440 - * Most probably by now CPU has serviced that pending interrupt and
441 - * it might not have done the ack_APIC_irq() because it thought,
442 - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it
443 - * does not clear the ISR bit and cpu thinks it has already serivced
444 - * the interrupt. Hence a vector might get locked. It was noticed
445 - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR.
446 + * If the ISR map is not empty. ACK the APIC and run another round
447 + * to verify whether a pending IRR has been unblocked and turned
448 + * into a ISR.
449 */
450 - do {
451 - queued = 0;
452 - for (i = APIC_ISR_NR - 1; i >= 0; i--)
453 - queued |= apic_read(APIC_IRR + i*0x10);
454 -
455 - for (i = APIC_ISR_NR - 1; i >= 0; i--) {
456 - value = apic_read(APIC_ISR + i*0x10);
457 - for_each_set_bit(j, &value, 32) {
458 - ack_APIC_irq();
459 - acked++;
460 - }
461 - }
462 - if (acked > 256) {
463 - pr_err("LAPIC pending interrupts after %d EOI\n", acked);
464 - break;
465 - }
466 - if (queued) {
467 - if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) {
468 - ntsc = rdtsc();
469 - max_loops = (long long)cpu_khz << 10;
470 - max_loops -= ntsc - tsc;
471 - } else {
472 - max_loops--;
473 - }
474 - }
475 - } while (queued && max_loops > 0);
476 - WARN_ON(max_loops <= 0);
477 + if (!bitmap_empty(isr->map, APIC_IR_BITS)) {
478 + /*
479 + * There can be multiple ISR bits set when a high priority
480 + * interrupt preempted a lower priority one. Issue an ACK
481 + * per set bit.
482 + */
483 + for_each_set_bit(bit, isr->map, APIC_IR_BITS)
484 + ack_APIC_irq();
485 + return true;
486 + }
487 +
488 + return !bitmap_empty(irr->map, APIC_IR_BITS);
489 +}
490 +
491 +/*
492 + * After a crash, we no longer service the interrupts and a pending
493 + * interrupt from previous kernel might still have ISR bit set.
494 + *
495 + * Most probably by now the CPU has serviced that pending interrupt and it
496 + * might not have done the ack_APIC_irq() because it thought, interrupt
497 + * came from i8259 as ExtInt. LAPIC did not get EOI so it does not clear
498 + * the ISR bit and cpu thinks it has already serivced the interrupt. Hence
499 + * a vector might get locked. It was noticed for timer irq (vector
500 + * 0x31). Issue an extra EOI to clear ISR.
501 + *
502 + * If there are pending IRR bits they turn into ISR bits after a higher
503 + * priority ISR bit has been acked.
504 + */
505 +static void apic_pending_intr_clear(void)
506 +{
507 + union apic_ir irr, isr;
508 + unsigned int i;
509 +
510 + /* 512 loops are way oversized and give the APIC a chance to obey. */
511 + for (i = 0; i < 512; i++) {
512 + if (!apic_check_and_ack(&irr, &isr))
513 + return;
514 + }
515 + /* Dump the IRR/ISR content if that failed */
516 + pr_warn("APIC: Stale IRR: %256pb ISR: %256pb\n", irr.map, isr.map);
517 }
518
519 /**
520 @@ -1520,6 +1538,14 @@ static void setup_local_APIC(void)
521 return;
522 }
523
524 + /*
525 + * If this comes from kexec/kcrash the APIC might be enabled in
526 + * SPIV. Soft disable it before doing further initialization.
527 + */
528 + value = apic_read(APIC_SPIV);
529 + value &= ~APIC_SPIV_APIC_ENABLED;
530 + apic_write(APIC_SPIV, value);
531 +
532 #ifdef CONFIG_X86_32
533 /* Pound the ESR really hard over the head with a big hammer - mbligh */
534 if (lapic_is_integrated() && apic->disable_esr) {
535 @@ -1565,6 +1591,7 @@ static void setup_local_APIC(void)
536 value &= ~APIC_TPRI_MASK;
537 apic_write(APIC_TASKPRI, value);
538
539 + /* Clear eventually stale ISR/IRR bits */
540 apic_pending_intr_clear();
541
542 /*
543 diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
544 index 10e1d17aa060..c352ca2e1456 100644
545 --- a/arch/x86/kernel/apic/vector.c
546 +++ b/arch/x86/kernel/apic/vector.c
547 @@ -400,6 +400,17 @@ static int activate_reserved(struct irq_data *irqd)
548 if (!irqd_can_reserve(irqd))
549 apicd->can_reserve = false;
550 }
551 +
552 + /*
553 + * Check to ensure that the effective affinity mask is a subset
554 + * the user supplied affinity mask, and warn the user if it is not
555 + */
556 + if (!cpumask_subset(irq_data_get_effective_affinity_mask(irqd),
557 + irq_data_get_affinity_mask(irqd))) {
558 + pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n",
559 + irqd->irq);
560 + }
561 +
562 return ret;
563 }
564
565 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
566 index 04adc8d60aed..b2b87b91f336 100644
567 --- a/arch/x86/kernel/smp.c
568 +++ b/arch/x86/kernel/smp.c
569 @@ -181,6 +181,12 @@ asmlinkage __visible void smp_reboot_interrupt(void)
570 irq_exit();
571 }
572
573 +static int register_stop_handler(void)
574 +{
575 + return register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
576 + NMI_FLAG_FIRST, "smp_stop");
577 +}
578 +
579 static void native_stop_other_cpus(int wait)
580 {
581 unsigned long flags;
582 @@ -214,39 +220,41 @@ static void native_stop_other_cpus(int wait)
583 apic->send_IPI_allbutself(REBOOT_VECTOR);
584
585 /*
586 - * Don't wait longer than a second if the caller
587 - * didn't ask us to wait.
588 + * Don't wait longer than a second for IPI completion. The
589 + * wait request is not checked here because that would
590 + * prevent an NMI shutdown attempt in case that not all
591 + * CPUs reach shutdown state.
592 */
593 timeout = USEC_PER_SEC;
594 - while (num_online_cpus() > 1 && (wait || timeout--))
595 + while (num_online_cpus() > 1 && timeout--)
596 udelay(1);
597 }
598 -
599 - /* if the REBOOT_VECTOR didn't work, try with the NMI */
600 - if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
601 - if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
602 - NMI_FLAG_FIRST, "smp_stop"))
603 - /* Note: we ignore failures here */
604 - /* Hope the REBOOT_IRQ is good enough */
605 - goto finish;
606 -
607 - /* sync above data before sending IRQ */
608 - wmb();
609
610 - pr_emerg("Shutting down cpus with NMI\n");
611 + /* if the REBOOT_VECTOR didn't work, try with the NMI */
612 + if (num_online_cpus() > 1) {
613 + /*
614 + * If NMI IPI is enabled, try to register the stop handler
615 + * and send the IPI. In any case try to wait for the other
616 + * CPUs to stop.
617 + */
618 + if (!smp_no_nmi_ipi && !register_stop_handler()) {
619 + /* Sync above data before sending IRQ */
620 + wmb();
621
622 - apic->send_IPI_allbutself(NMI_VECTOR);
623 + pr_emerg("Shutting down cpus with NMI\n");
624
625 + apic->send_IPI_allbutself(NMI_VECTOR);
626 + }
627 /*
628 - * Don't wait longer than a 10 ms if the caller
629 - * didn't ask us to wait.
630 + * Don't wait longer than 10 ms if the caller didn't
631 + * reqeust it. If wait is true, the machine hangs here if
632 + * one or more CPUs do not reach shutdown state.
633 */
634 timeout = USEC_PER_MSEC * 10;
635 while (num_online_cpus() > 1 && (wait || timeout--))
636 udelay(1);
637 }
638
639 -finish:
640 local_irq_save(flags);
641 disable_local_APIC();
642 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
643 diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
644 index 429728b35bca..e699f4d2a450 100644
645 --- a/arch/x86/kvm/emulate.c
646 +++ b/arch/x86/kvm/emulate.c
647 @@ -5368,6 +5368,8 @@ done_prefixes:
648 ctxt->memopp->addr.mem.ea + ctxt->_eip);
649
650 done:
651 + if (rc == X86EMUL_PROPAGATE_FAULT)
652 + ctxt->have_exception = true;
653 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
654 }
655
656 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
657 index dbae8415cf4a..05cb5855255e 100644
658 --- a/arch/x86/kvm/x86.c
659 +++ b/arch/x86/kvm/x86.c
660 @@ -581,8 +581,14 @@ static int kvm_read_nested_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn,
661 data, offset, len, access);
662 }
663
664 +static inline u64 pdptr_rsvd_bits(struct kvm_vcpu *vcpu)
665 +{
666 + return rsvd_bits(cpuid_maxphyaddr(vcpu), 63) | rsvd_bits(5, 8) |
667 + rsvd_bits(1, 2);
668 +}
669 +
670 /*
671 - * Load the pae pdptrs. Return true is they are all valid.
672 + * Load the pae pdptrs. Return 1 if they are all valid, 0 otherwise.
673 */
674 int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
675 {
676 @@ -601,8 +607,7 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3)
677 }
678 for (i = 0; i < ARRAY_SIZE(pdpte); ++i) {
679 if ((pdpte[i] & PT_PRESENT_MASK) &&
680 - (pdpte[i] &
681 - vcpu->arch.mmu.guest_rsvd_check.rsvd_bits_mask[0][2])) {
682 + (pdpte[i] & pdptr_rsvd_bits(vcpu))) {
683 ret = 0;
684 goto out;
685 }
686 @@ -6244,8 +6249,16 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
687 if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
688 emulation_type))
689 return EMULATE_DONE;
690 - if (ctxt->have_exception && inject_emulated_exception(vcpu))
691 + if (ctxt->have_exception) {
692 + /*
693 + * #UD should result in just EMULATION_FAILED, and trap-like
694 + * exception should not be encountered during decode.
695 + */
696 + WARN_ON_ONCE(ctxt->exception.vector == UD_VECTOR ||
697 + exception_type(ctxt->exception.vector) == EXCPT_TRAP);
698 + inject_emulated_exception(vcpu);
699 return EMULATE_DONE;
700 + }
701 if (emulation_type & EMULTYPE_SKIP)
702 return EMULATE_FAIL;
703 return handle_emulation_failure(vcpu, emulation_type);
704 diff --git a/arch/x86/mm/pti.c b/arch/x86/mm/pti.c
705 index 4df3e5c89d57..622d5968c979 100644
706 --- a/arch/x86/mm/pti.c
707 +++ b/arch/x86/mm/pti.c
708 @@ -338,13 +338,15 @@ pti_clone_pgtable(unsigned long start, unsigned long end,
709
710 pud = pud_offset(p4d, addr);
711 if (pud_none(*pud)) {
712 - addr += PUD_SIZE;
713 + WARN_ON_ONCE(addr & ~PUD_MASK);
714 + addr = round_up(addr + 1, PUD_SIZE);
715 continue;
716 }
717
718 pmd = pmd_offset(pud, addr);
719 if (pmd_none(*pmd)) {
720 - addr += PMD_SIZE;
721 + WARN_ON_ONCE(addr & ~PMD_MASK);
722 + addr = round_up(addr + 1, PMD_SIZE);
723 continue;
724 }
725
726 @@ -643,6 +645,8 @@ void __init pti_init(void)
727 */
728 void pti_finalize(void)
729 {
730 + if (!boot_cpu_has(X86_FEATURE_PTI))
731 + return;
732 /*
733 * We need to clone everything (again) that maps parts of the
734 * kernel image.
735 diff --git a/block/blk-flush.c b/block/blk-flush.c
736 index 87fc49daa2b4..256fa1ccc2bd 100644
737 --- a/block/blk-flush.c
738 +++ b/block/blk-flush.c
739 @@ -232,6 +232,16 @@ static void flush_end_io(struct request *flush_rq, blk_status_t error)
740
741 /* release the tag's ownership to the req cloned from */
742 spin_lock_irqsave(&fq->mq_flush_lock, flags);
743 +
744 + if (!refcount_dec_and_test(&flush_rq->ref)) {
745 + fq->rq_status = error;
746 + spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
747 + return;
748 + }
749 +
750 + if (fq->rq_status != BLK_STS_OK)
751 + error = fq->rq_status;
752 +
753 hctx = blk_mq_map_queue(q, flush_rq->mq_ctx->cpu);
754 if (!q->elevator) {
755 blk_mq_tag_set_rq(hctx, flush_rq->tag, fq->orig_rq);
756 diff --git a/block/blk-mq.c b/block/blk-mq.c
757 index 7ea85ec52026..684acaa96db7 100644
758 --- a/block/blk-mq.c
759 +++ b/block/blk-mq.c
760 @@ -844,7 +844,10 @@ static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
761 */
762 if (blk_mq_req_expired(rq, next))
763 blk_mq_rq_timed_out(rq, reserved);
764 - if (refcount_dec_and_test(&rq->ref))
765 +
766 + if (is_flush_rq(rq, hctx))
767 + rq->end_io(rq, 0);
768 + else if (refcount_dec_and_test(&rq->ref))
769 __blk_mq_free_request(rq);
770 }
771
772 diff --git a/block/blk.h b/block/blk.h
773 index 11e4ca2f2cd4..1a5b67b57e6b 100644
774 --- a/block/blk.h
775 +++ b/block/blk.h
776 @@ -23,6 +23,7 @@ struct blk_flush_queue {
777 unsigned int flush_queue_delayed:1;
778 unsigned int flush_pending_idx:1;
779 unsigned int flush_running_idx:1;
780 + blk_status_t rq_status;
781 unsigned long flush_pending_since;
782 struct list_head flush_queue[2];
783 struct list_head flush_data_in_flight;
784 @@ -123,6 +124,12 @@ static inline void __blk_get_queue(struct request_queue *q)
785 kobject_get(&q->kobj);
786 }
787
788 +static inline bool
789 +is_flush_rq(struct request *req, struct blk_mq_hw_ctx *hctx)
790 +{
791 + return hctx->fq->flush_rq == req;
792 +}
793 +
794 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
795 int node, int cmd_size, gfp_t flags);
796 void blk_free_flush_queue(struct blk_flush_queue *q);
797 diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
798 index fc447410ae4d..a448cdf56718 100644
799 --- a/drivers/acpi/acpi_processor.c
800 +++ b/drivers/acpi/acpi_processor.c
801 @@ -282,9 +282,13 @@ static int acpi_processor_get_info(struct acpi_device *device)
802 }
803
804 if (acpi_duplicate_processor_id(pr->acpi_id)) {
805 - dev_err(&device->dev,
806 - "Failed to get unique processor _UID (0x%x)\n",
807 - pr->acpi_id);
808 + if (pr->acpi_id == 0xff)
809 + dev_info_once(&device->dev,
810 + "Entry not well-defined, consider updating BIOS\n");
811 + else
812 + dev_err(&device->dev,
813 + "Failed to get unique processor _UID (0x%x)\n",
814 + pr->acpi_id);
815 return -ENODEV;
816 }
817
818 diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
819 index d9ce4b162e2c..a1aa59849b96 100644
820 --- a/drivers/acpi/cppc_acpi.c
821 +++ b/drivers/acpi/cppc_acpi.c
822 @@ -369,8 +369,10 @@ static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
823 union acpi_object *psd = NULL;
824 struct acpi_psd_package *pdomain;
825
826 - status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
827 - ACPI_TYPE_PACKAGE);
828 + status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
829 + &buffer, ACPI_TYPE_PACKAGE);
830 + if (status == AE_NOT_FOUND) /* _PSD is optional */
831 + return 0;
832 if (ACPI_FAILURE(status))
833 return -ENODEV;
834
835 diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
836 index e967c1173ba3..222ea3f12f41 100644
837 --- a/drivers/acpi/custom_method.c
838 +++ b/drivers/acpi/custom_method.c
839 @@ -48,8 +48,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
840 if ((*ppos > max_size) ||
841 (*ppos + count > max_size) ||
842 (*ppos + count < count) ||
843 - (count > uncopied_bytes))
844 + (count > uncopied_bytes)) {
845 + kfree(buf);
846 return -EINVAL;
847 + }
848
849 if (copy_from_user(buf + (*ppos), user_buf, count)) {
850 kfree(buf);
851 @@ -69,6 +71,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
852 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
853 }
854
855 + kfree(buf);
856 return count;
857 }
858
859 diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
860 index c576a6fe4ebb..94ded9513c73 100644
861 --- a/drivers/acpi/pci_irq.c
862 +++ b/drivers/acpi/pci_irq.c
863 @@ -462,8 +462,10 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
864 * No IRQ known to the ACPI subsystem - maybe the BIOS /
865 * driver reported one, then use it. Exit in any case.
866 */
867 - if (!acpi_pci_irq_valid(dev, pin))
868 + if (!acpi_pci_irq_valid(dev, pin)) {
869 + kfree(entry);
870 return 0;
871 + }
872
873 if (acpi_isa_register_gsi(dev))
874 dev_warn(&dev->dev, "PCI INT %c: no GSI\n",
875 diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
876 index 021ce46e2e57..5d110b1362e7 100644
877 --- a/drivers/ata/ahci.c
878 +++ b/drivers/ata/ahci.c
879 @@ -81,6 +81,12 @@ enum board_ids {
880 board_ahci_sb700, /* for SB700 and SB800 */
881 board_ahci_vt8251,
882
883 + /*
884 + * board IDs for Intel chipsets that support more than 6 ports
885 + * *and* end up needing the PCS quirk.
886 + */
887 + board_ahci_pcs7,
888 +
889 /* aliases */
890 board_ahci_mcp_linux = board_ahci_mcp65,
891 board_ahci_mcp67 = board_ahci_mcp65,
892 @@ -236,6 +242,12 @@ static const struct ata_port_info ahci_port_info[] = {
893 .udma_mask = ATA_UDMA6,
894 .port_ops = &ahci_vt8251_ops,
895 },
896 + [board_ahci_pcs7] = {
897 + .flags = AHCI_FLAG_COMMON,
898 + .pio_mask = ATA_PIO4,
899 + .udma_mask = ATA_UDMA6,
900 + .port_ops = &ahci_ops,
901 + },
902 };
903
904 static const struct pci_device_id ahci_pci_tbl[] = {
905 @@ -280,26 +292,26 @@ static const struct pci_device_id ahci_pci_tbl[] = {
906 { PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
907 { PCI_VDEVICE(INTEL, 0x3b2c), board_ahci_mobile }, /* PCH M RAID */
908 { PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
909 - { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
910 - { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
911 - { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
912 - { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
913 - { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
914 - { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
915 - { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
916 - { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
917 - { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
918 - { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
919 - { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
920 - { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
921 - { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
922 - { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
923 - { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
924 - { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
925 - { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
926 - { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
927 - { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
928 - { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
929 + { PCI_VDEVICE(INTEL, 0x19b0), board_ahci_pcs7 }, /* DNV AHCI */
930 + { PCI_VDEVICE(INTEL, 0x19b1), board_ahci_pcs7 }, /* DNV AHCI */
931 + { PCI_VDEVICE(INTEL, 0x19b2), board_ahci_pcs7 }, /* DNV AHCI */
932 + { PCI_VDEVICE(INTEL, 0x19b3), board_ahci_pcs7 }, /* DNV AHCI */
933 + { PCI_VDEVICE(INTEL, 0x19b4), board_ahci_pcs7 }, /* DNV AHCI */
934 + { PCI_VDEVICE(INTEL, 0x19b5), board_ahci_pcs7 }, /* DNV AHCI */
935 + { PCI_VDEVICE(INTEL, 0x19b6), board_ahci_pcs7 }, /* DNV AHCI */
936 + { PCI_VDEVICE(INTEL, 0x19b7), board_ahci_pcs7 }, /* DNV AHCI */
937 + { PCI_VDEVICE(INTEL, 0x19bE), board_ahci_pcs7 }, /* DNV AHCI */
938 + { PCI_VDEVICE(INTEL, 0x19bF), board_ahci_pcs7 }, /* DNV AHCI */
939 + { PCI_VDEVICE(INTEL, 0x19c0), board_ahci_pcs7 }, /* DNV AHCI */
940 + { PCI_VDEVICE(INTEL, 0x19c1), board_ahci_pcs7 }, /* DNV AHCI */
941 + { PCI_VDEVICE(INTEL, 0x19c2), board_ahci_pcs7 }, /* DNV AHCI */
942 + { PCI_VDEVICE(INTEL, 0x19c3), board_ahci_pcs7 }, /* DNV AHCI */
943 + { PCI_VDEVICE(INTEL, 0x19c4), board_ahci_pcs7 }, /* DNV AHCI */
944 + { PCI_VDEVICE(INTEL, 0x19c5), board_ahci_pcs7 }, /* DNV AHCI */
945 + { PCI_VDEVICE(INTEL, 0x19c6), board_ahci_pcs7 }, /* DNV AHCI */
946 + { PCI_VDEVICE(INTEL, 0x19c7), board_ahci_pcs7 }, /* DNV AHCI */
947 + { PCI_VDEVICE(INTEL, 0x19cE), board_ahci_pcs7 }, /* DNV AHCI */
948 + { PCI_VDEVICE(INTEL, 0x19cF), board_ahci_pcs7 }, /* DNV AHCI */
949 { PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
950 { PCI_VDEVICE(INTEL, 0x1c03), board_ahci_mobile }, /* CPT M AHCI */
951 { PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
952 @@ -639,30 +651,6 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
953 ahci_save_initial_config(&pdev->dev, hpriv);
954 }
955
956 -static int ahci_pci_reset_controller(struct ata_host *host)
957 -{
958 - struct pci_dev *pdev = to_pci_dev(host->dev);
959 - int rc;
960 -
961 - rc = ahci_reset_controller(host);
962 - if (rc)
963 - return rc;
964 -
965 - if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
966 - struct ahci_host_priv *hpriv = host->private_data;
967 - u16 tmp16;
968 -
969 - /* configure PCS */
970 - pci_read_config_word(pdev, 0x92, &tmp16);
971 - if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
972 - tmp16 |= hpriv->port_map;
973 - pci_write_config_word(pdev, 0x92, tmp16);
974 - }
975 - }
976 -
977 - return 0;
978 -}
979 -
980 static void ahci_pci_init_controller(struct ata_host *host)
981 {
982 struct ahci_host_priv *hpriv = host->private_data;
983 @@ -865,7 +853,7 @@ static int ahci_pci_device_runtime_resume(struct device *dev)
984 struct ata_host *host = pci_get_drvdata(pdev);
985 int rc;
986
987 - rc = ahci_pci_reset_controller(host);
988 + rc = ahci_reset_controller(host);
989 if (rc)
990 return rc;
991 ahci_pci_init_controller(host);
992 @@ -900,7 +888,7 @@ static int ahci_pci_device_resume(struct device *dev)
993 ahci_mcp89_apple_enable(pdev);
994
995 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) {
996 - rc = ahci_pci_reset_controller(host);
997 + rc = ahci_reset_controller(host);
998 if (rc)
999 return rc;
1000
1001 @@ -1635,6 +1623,34 @@ update_policy:
1002 ap->target_lpm_policy = policy;
1003 }
1004
1005 +static void ahci_intel_pcs_quirk(struct pci_dev *pdev, struct ahci_host_priv *hpriv)
1006 +{
1007 + const struct pci_device_id *id = pci_match_id(ahci_pci_tbl, pdev);
1008 + u16 tmp16;
1009 +
1010 + /*
1011 + * Only apply the 6-port PCS quirk for known legacy platforms.
1012 + */
1013 + if (!id || id->vendor != PCI_VENDOR_ID_INTEL)
1014 + return;
1015 + if (((enum board_ids) id->driver_data) < board_ahci_pcs7)
1016 + return;
1017 +
1018 + /*
1019 + * port_map is determined from PORTS_IMPL PCI register which is
1020 + * implemented as write or write-once register. If the register
1021 + * isn't programmed, ahci automatically generates it from number
1022 + * of ports, which is good enough for PCS programming. It is
1023 + * otherwise expected that platform firmware enables the ports
1024 + * before the OS boots.
1025 + */
1026 + pci_read_config_word(pdev, PCS_6, &tmp16);
1027 + if ((tmp16 & hpriv->port_map) != hpriv->port_map) {
1028 + tmp16 |= hpriv->port_map;
1029 + pci_write_config_word(pdev, PCS_6, tmp16);
1030 + }
1031 +}
1032 +
1033 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1034 {
1035 unsigned int board_id = ent->driver_data;
1036 @@ -1747,6 +1763,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1037 /* save initial config */
1038 ahci_pci_save_initial_config(pdev, hpriv);
1039
1040 + /*
1041 + * If platform firmware failed to enable ports, try to enable
1042 + * them here.
1043 + */
1044 + ahci_intel_pcs_quirk(pdev, hpriv);
1045 +
1046 /* prepare host */
1047 if (hpriv->cap & HOST_CAP_NCQ) {
1048 pi.flags |= ATA_FLAG_NCQ;
1049 @@ -1856,7 +1878,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1050 if (rc)
1051 return rc;
1052
1053 - rc = ahci_pci_reset_controller(host);
1054 + rc = ahci_reset_controller(host);
1055 if (rc)
1056 return rc;
1057
1058 diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
1059 index 6a1515f0da40..9290e787abdc 100644
1060 --- a/drivers/ata/ahci.h
1061 +++ b/drivers/ata/ahci.h
1062 @@ -261,6 +261,8 @@ enum {
1063 ATA_FLAG_ACPI_SATA | ATA_FLAG_AN,
1064
1065 ICH_MAP = 0x90, /* ICH MAP register */
1066 + PCS_6 = 0x92, /* 6 port PCS */
1067 + PCS_7 = 0x94, /* 7+ port PCS (Denverton) */
1068
1069 /* em constants */
1070 EM_MAX_SLOTS = 8,
1071 diff --git a/drivers/base/soc.c b/drivers/base/soc.c
1072 index 10b280f30217..7e91894a380b 100644
1073 --- a/drivers/base/soc.c
1074 +++ b/drivers/base/soc.c
1075 @@ -157,6 +157,7 @@ out2:
1076 out1:
1077 return ERR_PTR(ret);
1078 }
1079 +EXPORT_SYMBOL_GPL(soc_device_register);
1080
1081 /* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
1082 void soc_device_unregister(struct soc_device *soc_dev)
1083 @@ -166,6 +167,7 @@ void soc_device_unregister(struct soc_device *soc_dev)
1084 device_unregister(&soc_dev->dev);
1085 early_soc_dev_attr = NULL;
1086 }
1087 +EXPORT_SYMBOL_GPL(soc_device_unregister);
1088
1089 static int __init soc_bus_register(void)
1090 {
1091 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1092 index cef8e00c9d9d..126c2c514673 100644
1093 --- a/drivers/block/loop.c
1094 +++ b/drivers/block/loop.c
1095 @@ -1719,6 +1719,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
1096 case LOOP_SET_FD:
1097 case LOOP_CHANGE_FD:
1098 case LOOP_SET_BLOCK_SIZE:
1099 + case LOOP_SET_DIRECT_IO:
1100 err = lo_ioctl(bdev, mode, cmd, arg);
1101 break;
1102 default:
1103 diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
1104 index fa60f265ee50..b1c7009de1f4 100644
1105 --- a/drivers/block/nbd.c
1106 +++ b/drivers/block/nbd.c
1107 @@ -353,8 +353,10 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
1108 }
1109 config = nbd->config;
1110
1111 - if (!mutex_trylock(&cmd->lock))
1112 + if (!mutex_trylock(&cmd->lock)) {
1113 + nbd_config_put(nbd);
1114 return BLK_EH_RESET_TIMER;
1115 + }
1116
1117 if (config->num_connections > 1) {
1118 dev_err_ratelimited(nbd_to_dev(nbd),
1119 diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
1120 index aaf9e5afaad4..0ef7cb0448e8 100644
1121 --- a/drivers/char/hw_random/core.c
1122 +++ b/drivers/char/hw_random/core.c
1123 @@ -67,7 +67,7 @@ static void add_early_randomness(struct hwrng *rng)
1124 size_t size = min_t(size_t, 16, rng_buffer_size());
1125
1126 mutex_lock(&reading_mutex);
1127 - bytes_read = rng_get_data(rng, rng_buffer, size, 1);
1128 + bytes_read = rng_get_data(rng, rng_buffer, size, 0);
1129 mutex_unlock(&reading_mutex);
1130 if (bytes_read > 0)
1131 add_device_randomness(rng_buffer, bytes_read);
1132 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
1133 index 7b4e4de778e4..54b86490d9ca 100644
1134 --- a/drivers/char/mem.c
1135 +++ b/drivers/char/mem.c
1136 @@ -97,6 +97,13 @@ void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
1137 }
1138 #endif
1139
1140 +static inline bool should_stop_iteration(void)
1141 +{
1142 + if (need_resched())
1143 + cond_resched();
1144 + return fatal_signal_pending(current);
1145 +}
1146 +
1147 /*
1148 * This funcion reads the *physical* memory. The f_pos points directly to the
1149 * memory location.
1150 @@ -175,6 +182,8 @@ static ssize_t read_mem(struct file *file, char __user *buf,
1151 p += sz;
1152 count -= sz;
1153 read += sz;
1154 + if (should_stop_iteration())
1155 + break;
1156 }
1157 kfree(bounce);
1158
1159 @@ -251,6 +260,8 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
1160 p += sz;
1161 count -= sz;
1162 written += sz;
1163 + if (should_stop_iteration())
1164 + break;
1165 }
1166
1167 *ppos += written;
1168 @@ -468,6 +479,10 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
1169 read += sz;
1170 low_count -= sz;
1171 count -= sz;
1172 + if (should_stop_iteration()) {
1173 + count = 0;
1174 + break;
1175 + }
1176 }
1177 }
1178
1179 @@ -492,6 +507,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
1180 buf += sz;
1181 read += sz;
1182 p += sz;
1183 + if (should_stop_iteration())
1184 + break;
1185 }
1186 free_page((unsigned long)kbuf);
1187 }
1188 @@ -544,6 +561,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
1189 p += sz;
1190 count -= sz;
1191 written += sz;
1192 + if (should_stop_iteration())
1193 + break;
1194 }
1195
1196 *ppos += written;
1197 @@ -595,6 +614,8 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
1198 buf += sz;
1199 virtr += sz;
1200 p += sz;
1201 + if (should_stop_iteration())
1202 + break;
1203 }
1204 free_page((unsigned long)kbuf);
1205 }
1206 diff --git a/drivers/devfreq/exynos-bus.c b/drivers/devfreq/exynos-bus.c
1207 index c25658b26598..24a9658348d7 100644
1208 --- a/drivers/devfreq/exynos-bus.c
1209 +++ b/drivers/devfreq/exynos-bus.c
1210 @@ -194,11 +194,10 @@ static void exynos_bus_exit(struct device *dev)
1211 if (ret < 0)
1212 dev_warn(dev, "failed to disable the devfreq-event devices\n");
1213
1214 - if (bus->regulator)
1215 - regulator_disable(bus->regulator);
1216 -
1217 dev_pm_opp_of_remove_table(dev);
1218 clk_disable_unprepare(bus->clk);
1219 + if (bus->regulator)
1220 + regulator_disable(bus->regulator);
1221 }
1222
1223 /*
1224 @@ -386,6 +385,7 @@ static int exynos_bus_probe(struct platform_device *pdev)
1225 struct exynos_bus *bus;
1226 int ret, max_state;
1227 unsigned long min_freq, max_freq;
1228 + bool passive = false;
1229
1230 if (!np) {
1231 dev_err(dev, "failed to find devicetree node\n");
1232 @@ -399,27 +399,27 @@ static int exynos_bus_probe(struct platform_device *pdev)
1233 bus->dev = &pdev->dev;
1234 platform_set_drvdata(pdev, bus);
1235
1236 - /* Parse the device-tree to get the resource information */
1237 - ret = exynos_bus_parse_of(np, bus);
1238 - if (ret < 0)
1239 - return ret;
1240 -
1241 profile = devm_kzalloc(dev, sizeof(*profile), GFP_KERNEL);
1242 - if (!profile) {
1243 - ret = -ENOMEM;
1244 - goto err;
1245 - }
1246 + if (!profile)
1247 + return -ENOMEM;
1248
1249 node = of_parse_phandle(dev->of_node, "devfreq", 0);
1250 if (node) {
1251 of_node_put(node);
1252 - goto passive;
1253 + passive = true;
1254 } else {
1255 ret = exynos_bus_parent_parse_of(np, bus);
1256 + if (ret < 0)
1257 + return ret;
1258 }
1259
1260 + /* Parse the device-tree to get the resource information */
1261 + ret = exynos_bus_parse_of(np, bus);
1262 if (ret < 0)
1263 - goto err;
1264 + goto err_reg;
1265 +
1266 + if (passive)
1267 + goto passive;
1268
1269 /* Initialize the struct profile and governor data for parent device */
1270 profile->polling_ms = 50;
1271 @@ -510,6 +510,9 @@ out:
1272 err:
1273 dev_pm_opp_of_remove_table(dev);
1274 clk_disable_unprepare(bus->clk);
1275 +err_reg:
1276 + if (!passive)
1277 + regulator_disable(bus->regulator);
1278
1279 return ret;
1280 }
1281 diff --git a/drivers/devfreq/governor_passive.c b/drivers/devfreq/governor_passive.c
1282 index 3bc29acbd54e..8cfb69749d49 100644
1283 --- a/drivers/devfreq/governor_passive.c
1284 +++ b/drivers/devfreq/governor_passive.c
1285 @@ -152,7 +152,6 @@ static int devfreq_passive_notifier_call(struct notifier_block *nb,
1286 static int devfreq_passive_event_handler(struct devfreq *devfreq,
1287 unsigned int event, void *data)
1288 {
1289 - struct device *dev = devfreq->dev.parent;
1290 struct devfreq_passive_data *p_data
1291 = (struct devfreq_passive_data *)devfreq->data;
1292 struct devfreq *parent = (struct devfreq *)p_data->parent;
1293 @@ -168,12 +167,12 @@ static int devfreq_passive_event_handler(struct devfreq *devfreq,
1294 p_data->this = devfreq;
1295
1296 nb->notifier_call = devfreq_passive_notifier_call;
1297 - ret = devm_devfreq_register_notifier(dev, parent, nb,
1298 + ret = devfreq_register_notifier(parent, nb,
1299 DEVFREQ_TRANSITION_NOTIFIER);
1300 break;
1301 case DEVFREQ_GOV_STOP:
1302 - devm_devfreq_unregister_notifier(dev, parent, nb,
1303 - DEVFREQ_TRANSITION_NOTIFIER);
1304 + WARN_ON(devfreq_unregister_notifier(parent, nb,
1305 + DEVFREQ_TRANSITION_NOTIFIER));
1306 break;
1307 default:
1308 break;
1309 diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
1310 index 2b11d967acd0..9d782cc95c6a 100644
1311 --- a/drivers/dma/bcm2835-dma.c
1312 +++ b/drivers/dma/bcm2835-dma.c
1313 @@ -898,8 +898,10 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
1314 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
1315
1316 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1317 - if (rc)
1318 + if (rc) {
1319 + dev_err(&pdev->dev, "Unable to set DMA mask\n");
1320 return rc;
1321 + }
1322
1323 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
1324 if (!od)
1325 diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
1326 index a410657f7bcd..012584cf3c17 100644
1327 --- a/drivers/dma/iop-adma.c
1328 +++ b/drivers/dma/iop-adma.c
1329 @@ -125,9 +125,9 @@ static void __iop_adma_slot_cleanup(struct iop_adma_chan *iop_chan)
1330 list_for_each_entry_safe(iter, _iter, &iop_chan->chain,
1331 chain_node) {
1332 pr_debug("\tcookie: %d slot: %d busy: %d "
1333 - "this_desc: %#x next_desc: %#x ack: %d\n",
1334 + "this_desc: %#x next_desc: %#llx ack: %d\n",
1335 iter->async_tx.cookie, iter->idx, busy,
1336 - iter->async_tx.phys, iop_desc_get_next_desc(iter),
1337 + iter->async_tx.phys, (u64)iop_desc_get_next_desc(iter),
1338 async_tx_test_ack(&iter->async_tx));
1339 prefetch(_iter);
1340 prefetch(&_iter->async_tx);
1341 @@ -315,9 +315,9 @@ retry:
1342 int i;
1343 dev_dbg(iop_chan->device->common.dev,
1344 "allocated slot: %d "
1345 - "(desc %p phys: %#x) slots_per_op %d\n",
1346 + "(desc %p phys: %#llx) slots_per_op %d\n",
1347 iter->idx, iter->hw_desc,
1348 - iter->async_tx.phys, slots_per_op);
1349 + (u64)iter->async_tx.phys, slots_per_op);
1350
1351 /* pre-ack all but the last descriptor */
1352 if (num_slots != slots_per_op)
1353 @@ -525,7 +525,7 @@ iop_adma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dma_dest,
1354 return NULL;
1355 BUG_ON(len > IOP_ADMA_MAX_BYTE_COUNT);
1356
1357 - dev_dbg(iop_chan->device->common.dev, "%s len: %u\n",
1358 + dev_dbg(iop_chan->device->common.dev, "%s len: %zu\n",
1359 __func__, len);
1360
1361 spin_lock_bh(&iop_chan->lock);
1362 @@ -558,7 +558,7 @@ iop_adma_prep_dma_xor(struct dma_chan *chan, dma_addr_t dma_dest,
1363 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
1364
1365 dev_dbg(iop_chan->device->common.dev,
1366 - "%s src_cnt: %d len: %u flags: %lx\n",
1367 + "%s src_cnt: %d len: %zu flags: %lx\n",
1368 __func__, src_cnt, len, flags);
1369
1370 spin_lock_bh(&iop_chan->lock);
1371 @@ -591,7 +591,7 @@ iop_adma_prep_dma_xor_val(struct dma_chan *chan, dma_addr_t *dma_src,
1372 if (unlikely(!len))
1373 return NULL;
1374
1375 - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
1376 + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
1377 __func__, src_cnt, len);
1378
1379 spin_lock_bh(&iop_chan->lock);
1380 @@ -629,7 +629,7 @@ iop_adma_prep_dma_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
1381 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
1382
1383 dev_dbg(iop_chan->device->common.dev,
1384 - "%s src_cnt: %d len: %u flags: %lx\n",
1385 + "%s src_cnt: %d len: %zu flags: %lx\n",
1386 __func__, src_cnt, len, flags);
1387
1388 if (dmaf_p_disabled_continue(flags))
1389 @@ -692,7 +692,7 @@ iop_adma_prep_dma_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
1390 return NULL;
1391 BUG_ON(len > IOP_ADMA_XOR_MAX_BYTE_COUNT);
1392
1393 - dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %u\n",
1394 + dev_dbg(iop_chan->device->common.dev, "%s src_cnt: %d len: %zu\n",
1395 __func__, src_cnt, len);
1396
1397 spin_lock_bh(&iop_chan->lock);
1398 diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c
1399 index ceabdea40ae0..982631d4e1f8 100644
1400 --- a/drivers/dma/ti/edma.c
1401 +++ b/drivers/dma/ti/edma.c
1402 @@ -2273,9 +2273,6 @@ static int edma_probe(struct platform_device *pdev)
1403
1404 ecc->default_queue = info->default_queue;
1405
1406 - for (i = 0; i < ecc->num_slots; i++)
1407 - edma_write_slot(ecc, i, &dummy_paramset);
1408 -
1409 if (info->rsv) {
1410 /* Set the reserved slots in inuse list */
1411 rsv_slots = info->rsv->rsv_slots;
1412 @@ -2288,6 +2285,12 @@ static int edma_probe(struct platform_device *pdev)
1413 }
1414 }
1415
1416 + for (i = 0; i < ecc->num_slots; i++) {
1417 + /* Reset only unused - not reserved - paRAM slots */
1418 + if (!test_bit(i, ecc->slot_inuse))
1419 + edma_write_slot(ecc, i, &dummy_paramset);
1420 + }
1421 +
1422 /* Clear the xbar mapped channels in unused list */
1423 xbar_chans = info->xbar_chans;
1424 if (xbar_chans) {
1425 diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c
1426 index 5762c3c383f2..56de378ad13d 100644
1427 --- a/drivers/edac/altera_edac.c
1428 +++ b/drivers/edac/altera_edac.c
1429 @@ -1956,6 +1956,7 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
1430 struct altr_arria10_edac *edac = irq_desc_get_handler_data(desc);
1431 struct irq_chip *chip = irq_desc_get_chip(desc);
1432 int irq = irq_desc_get_irq(desc);
1433 + unsigned long bits;
1434
1435 dberr = (irq == edac->db_irq) ? 1 : 0;
1436 sm_offset = dberr ? A10_SYSMGR_ECC_INTSTAT_DERR_OFST :
1437 @@ -1965,7 +1966,8 @@ static void altr_edac_a10_irq_handler(struct irq_desc *desc)
1438
1439 regmap_read(edac->ecc_mgr_map, sm_offset, &irq_status);
1440
1441 - for_each_set_bit(bit, (unsigned long *)&irq_status, 32) {
1442 + bits = irq_status;
1443 + for_each_set_bit(bit, &bits, 32) {
1444 irq = irq_linear_revmap(edac->domain, dberr * 32 + bit);
1445 if (irq)
1446 generic_handle_irq(irq);
1447 diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
1448 index e2addb2bca29..94265e438514 100644
1449 --- a/drivers/edac/amd64_edac.c
1450 +++ b/drivers/edac/amd64_edac.c
1451 @@ -2501,13 +2501,6 @@ static void decode_umc_error(int node_id, struct mce *m)
1452 goto log_error;
1453 }
1454
1455 - if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
1456 - err.err_code = ERR_NORM_ADDR;
1457 - goto log_error;
1458 - }
1459 -
1460 - error_address_to_page_and_offset(sys_addr, &err);
1461 -
1462 if (!(m->status & MCI_STATUS_SYNDV)) {
1463 err.err_code = ERR_SYND;
1464 goto log_error;
1465 @@ -2524,6 +2517,13 @@ static void decode_umc_error(int node_id, struct mce *m)
1466
1467 err.csrow = m->synd & 0x7;
1468
1469 + if (umc_normaddr_to_sysaddr(m->addr, pvt->mc_node_id, err.channel, &sys_addr)) {
1470 + err.err_code = ERR_NORM_ADDR;
1471 + goto log_error;
1472 + }
1473 +
1474 + error_address_to_page_and_offset(sys_addr, &err);
1475 +
1476 log_error:
1477 __log_ecc_error(mci, &err, ecc_type);
1478 }
1479 @@ -3101,12 +3101,15 @@ static bool ecc_enabled(struct pci_dev *F3, u16 nid)
1480 static inline void
1481 f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
1482 {
1483 - u8 i, ecc_en = 1, cpk_en = 1;
1484 + u8 i, ecc_en = 1, cpk_en = 1, dev_x4 = 1, dev_x16 = 1;
1485
1486 for (i = 0; i < NUM_UMCS; i++) {
1487 if (pvt->umc[i].sdp_ctrl & UMC_SDP_INIT) {
1488 ecc_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_ENABLED);
1489 cpk_en &= !!(pvt->umc[i].umc_cap_hi & UMC_ECC_CHIPKILL_CAP);
1490 +
1491 + dev_x4 &= !!(pvt->umc[i].dimm_cfg & BIT(6));
1492 + dev_x16 &= !!(pvt->umc[i].dimm_cfg & BIT(7));
1493 }
1494 }
1495
1496 @@ -3114,8 +3117,15 @@ f17h_determine_edac_ctl_cap(struct mem_ctl_info *mci, struct amd64_pvt *pvt)
1497 if (ecc_en) {
1498 mci->edac_ctl_cap |= EDAC_FLAG_SECDED;
1499
1500 - if (cpk_en)
1501 + if (!cpk_en)
1502 + return;
1503 +
1504 + if (dev_x4)
1505 mci->edac_ctl_cap |= EDAC_FLAG_S4ECD4ED;
1506 + else if (dev_x16)
1507 + mci->edac_ctl_cap |= EDAC_FLAG_S16ECD16ED;
1508 + else
1509 + mci->edac_ctl_cap |= EDAC_FLAG_S8ECD8ED;
1510 }
1511 }
1512
1513 diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
1514 index 7d3edd713932..f59511bd9926 100644
1515 --- a/drivers/edac/edac_mc.c
1516 +++ b/drivers/edac/edac_mc.c
1517 @@ -1246,9 +1246,13 @@ void edac_mc_handle_error(const enum hw_event_mc_err_type type,
1518 if (p > e->location)
1519 *(p - 1) = '\0';
1520
1521 - /* Report the error via the trace interface */
1522 - grain_bits = fls_long(e->grain) + 1;
1523 + /* Sanity-check driver-supplied grain value. */
1524 + if (WARN_ON_ONCE(!e->grain))
1525 + e->grain = 1;
1526 +
1527 + grain_bits = fls_long(e->grain - 1);
1528
1529 + /* Report the error via the trace interface */
1530 if (IS_ENABLED(CONFIG_RAS))
1531 trace_mc_event(type, e->msg, e->label, e->error_count,
1532 mci->mc_idx, e->top_layer, e->mid_layer,
1533 diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
1534 index 903a4f1fadcc..0153c730750e 100644
1535 --- a/drivers/edac/pnd2_edac.c
1536 +++ b/drivers/edac/pnd2_edac.c
1537 @@ -268,11 +268,14 @@ static u64 get_sideband_reg_base_addr(void)
1538 }
1539 }
1540
1541 +#define DNV_MCHBAR_SIZE 0x8000
1542 +#define DNV_SB_PORT_SIZE 0x10000
1543 static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *name)
1544 {
1545 struct pci_dev *pdev;
1546 char *base;
1547 u64 addr;
1548 + unsigned long size;
1549
1550 if (op == 4) {
1551 pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x1980, NULL);
1552 @@ -287,15 +290,17 @@ static int dnv_rd_reg(int port, int off, int op, void *data, size_t sz, char *na
1553 addr = get_mem_ctrl_hub_base_addr();
1554 if (!addr)
1555 return -ENODEV;
1556 + size = DNV_MCHBAR_SIZE;
1557 } else {
1558 /* MMIO via sideband register base address */
1559 addr = get_sideband_reg_base_addr();
1560 if (!addr)
1561 return -ENODEV;
1562 addr += (port << 16);
1563 + size = DNV_SB_PORT_SIZE;
1564 }
1565
1566 - base = ioremap((resource_size_t)addr, 0x10000);
1567 + base = ioremap((resource_size_t)addr, size);
1568 if (!base)
1569 return -ENODEV;
1570
1571 diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c
1572 index 8f952f2f1a29..09119e3f5c01 100644
1573 --- a/drivers/firmware/arm_scmi/driver.c
1574 +++ b/drivers/firmware/arm_scmi/driver.c
1575 @@ -271,6 +271,14 @@ static void scmi_tx_prepare(struct mbox_client *cl, void *m)
1576 struct scmi_chan_info *cinfo = client_to_scmi_chan_info(cl);
1577 struct scmi_shared_mem __iomem *mem = cinfo->payload;
1578
1579 + /*
1580 + * Ideally channel must be free by now unless OS timeout last
1581 + * request and platform continued to process the same, wait
1582 + * until it releases the shared memory, otherwise we may endup
1583 + * overwriting its response with new message payload or vice-versa
1584 + */
1585 + spin_until_cond(ioread32(&mem->channel_status) &
1586 + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
1587 /* Mark channel busy + clear error */
1588 iowrite32(0x0, &mem->channel_status);
1589 iowrite32(t->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED,
1590 diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
1591 index 6090d25dce85..4045098ddb86 100644
1592 --- a/drivers/firmware/efi/cper.c
1593 +++ b/drivers/firmware/efi/cper.c
1594 @@ -402,6 +402,21 @@ static void cper_print_pcie(const char *pfx, const struct cper_sec_pcie *pcie,
1595 printk(
1596 "%s""bridge: secondary_status: 0x%04x, control: 0x%04x\n",
1597 pfx, pcie->bridge.secondary_status, pcie->bridge.control);
1598 +
1599 + /* Fatal errors call __ghes_panic() before AER handler prints this */
1600 + if ((pcie->validation_bits & CPER_PCIE_VALID_AER_INFO) &&
1601 + (gdata->error_severity & CPER_SEV_FATAL)) {
1602 + struct aer_capability_regs *aer;
1603 +
1604 + aer = (struct aer_capability_regs *)pcie->aer_info;
1605 + printk("%saer_uncor_status: 0x%08x, aer_uncor_mask: 0x%08x\n",
1606 + pfx, aer->uncor_status, aer->uncor_mask);
1607 + printk("%saer_uncor_severity: 0x%08x\n",
1608 + pfx, aer->uncor_severity);
1609 + printk("%sTLP Header: %08x %08x %08x %08x\n", pfx,
1610 + aer->header_log.dw0, aer->header_log.dw1,
1611 + aer->header_log.dw2, aer->header_log.dw3);
1612 + }
1613 }
1614
1615 static void cper_print_tstamp(const char *pfx,
1616 diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c
1617 index e778af766fae..98c987188835 100644
1618 --- a/drivers/firmware/qcom_scm.c
1619 +++ b/drivers/firmware/qcom_scm.c
1620 @@ -18,6 +18,7 @@
1621 #include <linux/init.h>
1622 #include <linux/cpumask.h>
1623 #include <linux/export.h>
1624 +#include <linux/dma-direct.h>
1625 #include <linux/dma-mapping.h>
1626 #include <linux/module.h>
1627 #include <linux/types.h>
1628 @@ -449,6 +450,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1629 phys_addr_t mem_to_map_phys;
1630 phys_addr_t dest_phys;
1631 phys_addr_t ptr_phys;
1632 + dma_addr_t ptr_dma;
1633 size_t mem_to_map_sz;
1634 size_t dest_sz;
1635 size_t src_sz;
1636 @@ -466,9 +468,10 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1637 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
1638 ALIGN(dest_sz, SZ_64);
1639
1640 - ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
1641 + ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_dma, GFP_KERNEL);
1642 if (!ptr)
1643 return -ENOMEM;
1644 + ptr_phys = dma_to_phys(__scm->dev, ptr_dma);
1645
1646 /* Fill source vmid detail */
1647 src = ptr;
1648 @@ -498,7 +501,7 @@ int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
1649
1650 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
1651 ptr_phys, src_sz, dest_phys, dest_sz);
1652 - dma_free_coherent(__scm->dev, ALIGN(ptr_sz, SZ_64), ptr, ptr_phys);
1653 + dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_dma);
1654 if (ret) {
1655 dev_err(__scm->dev,
1656 "Assign memory protection call failed %d.\n", ret);
1657 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1658 index 221de241535a..3b07a316680c 100644
1659 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1660 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1661 @@ -1462,6 +1462,7 @@ static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
1662 }
1663
1664 static const struct backlight_ops amdgpu_dm_backlight_ops = {
1665 + .options = BL_CORE_SUSPENDRESUME,
1666 .get_brightness = amdgpu_dm_backlight_get_brightness,
1667 .update_status = amdgpu_dm_backlight_update_status,
1668 };
1669 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
1670 index b52ccab428a9..c7c505095402 100644
1671 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
1672 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
1673 @@ -4052,6 +4052,11 @@ static int smu7_program_display_gap(struct pp_hwmgr *hwmgr)
1674
1675 data->frame_time_x2 = frame_time_in_us * 2 / 100;
1676
1677 + if (data->frame_time_x2 < 280) {
1678 + pr_debug("%s: enforce minimal VBITimeout: %d -> 280\n", __func__, data->frame_time_x2);
1679 + data->frame_time_x2 = 280;
1680 + }
1681 +
1682 display_gap2 = pre_vbi_time_in_us * (ref_clock / 100);
1683
1684 cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCG_DISPLAY_GAP_CNTL2, display_gap2);
1685 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
1686 index 34e45b97629e..2f2fb1966958 100644
1687 --- a/drivers/hwmon/acpi_power_meter.c
1688 +++ b/drivers/hwmon/acpi_power_meter.c
1689 @@ -694,8 +694,8 @@ static int setup_attrs(struct acpi_power_meter_resource *resource)
1690
1691 if (resource->caps.flags & POWER_METER_CAN_CAP) {
1692 if (!can_cap_in_hardware()) {
1693 - dev_err(&resource->acpi_dev->dev,
1694 - "Ignoring unsafe software power cap!\n");
1695 + dev_warn(&resource->acpi_dev->dev,
1696 + "Ignoring unsafe software power cap!\n");
1697 goto skip_unsafe_cap;
1698 }
1699
1700 diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
1701 index b75ff144b570..e6f351c92c02 100644
1702 --- a/drivers/i2c/busses/i2c-riic.c
1703 +++ b/drivers/i2c/busses/i2c-riic.c
1704 @@ -203,6 +203,7 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
1705 if (readb(riic->base + RIIC_ICSR2) & ICSR2_NACKF) {
1706 /* We got a NACKIE */
1707 readb(riic->base + RIIC_ICDRR); /* dummy read */
1708 + riic_clear_set_bit(riic, ICSR2_NACKF, 0, RIIC_ICSR2);
1709 riic->err = -ENXIO;
1710 } else if (riic->bytes_left) {
1711 return IRQ_NONE;
1712 diff --git a/drivers/infiniband/hw/hfi1/mad.c b/drivers/infiniband/hw/hfi1/mad.c
1713 index 0307405491e0..f208a25d0e4f 100644
1714 --- a/drivers/infiniband/hw/hfi1/mad.c
1715 +++ b/drivers/infiniband/hw/hfi1/mad.c
1716 @@ -2326,7 +2326,7 @@ struct opa_port_status_req {
1717 __be32 vl_select_mask;
1718 };
1719
1720 -#define VL_MASK_ALL 0x000080ff
1721 +#define VL_MASK_ALL 0x00000000000080ffUL
1722
1723 struct opa_port_status_rsp {
1724 __u8 port_num;
1725 @@ -2625,15 +2625,14 @@ static int pma_get_opa_classportinfo(struct opa_pma_mad *pmp,
1726 }
1727
1728 static void a0_portstatus(struct hfi1_pportdata *ppd,
1729 - struct opa_port_status_rsp *rsp, u32 vl_select_mask)
1730 + struct opa_port_status_rsp *rsp)
1731 {
1732 if (!is_bx(ppd->dd)) {
1733 unsigned long vl;
1734 u64 sum_vl_xmit_wait = 0;
1735 - u32 vl_all_mask = VL_MASK_ALL;
1736 + unsigned long vl_all_mask = VL_MASK_ALL;
1737
1738 - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
1739 - 8 * sizeof(vl_all_mask)) {
1740 + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
1741 u64 tmp = sum_vl_xmit_wait +
1742 read_port_cntr(ppd, C_TX_WAIT_VL,
1743 idx_from_vl(vl));
1744 @@ -2730,12 +2729,12 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
1745 (struct opa_port_status_req *)pmp->data;
1746 struct hfi1_devdata *dd = dd_from_ibdev(ibdev);
1747 struct opa_port_status_rsp *rsp;
1748 - u32 vl_select_mask = be32_to_cpu(req->vl_select_mask);
1749 + unsigned long vl_select_mask = be32_to_cpu(req->vl_select_mask);
1750 unsigned long vl;
1751 size_t response_data_size;
1752 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
1753 u8 port_num = req->port_num;
1754 - u8 num_vls = hweight32(vl_select_mask);
1755 + u8 num_vls = hweight64(vl_select_mask);
1756 struct _vls_pctrs *vlinfo;
1757 struct hfi1_ibport *ibp = to_iport(ibdev, port);
1758 struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
1759 @@ -2771,7 +2770,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
1760
1761 hfi1_read_link_quality(dd, &rsp->link_quality_indicator);
1762
1763 - rsp->vl_select_mask = cpu_to_be32(vl_select_mask);
1764 + rsp->vl_select_mask = cpu_to_be32((u32)vl_select_mask);
1765 rsp->port_xmit_data = cpu_to_be64(read_dev_cntr(dd, C_DC_XMIT_FLITS,
1766 CNTR_INVALID_VL));
1767 rsp->port_rcv_data = cpu_to_be64(read_dev_cntr(dd, C_DC_RCV_FLITS,
1768 @@ -2842,8 +2841,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
1769 * So in the for_each_set_bit() loop below, we don't need
1770 * any additional checks for vl.
1771 */
1772 - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
1773 - 8 * sizeof(vl_select_mask)) {
1774 + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
1775 memset(vlinfo, 0, sizeof(*vlinfo));
1776
1777 tmp = read_dev_cntr(dd, C_DC_RX_FLIT_VL, idx_from_vl(vl));
1778 @@ -2884,7 +2882,7 @@ static int pma_get_opa_portstatus(struct opa_pma_mad *pmp,
1779 vfi++;
1780 }
1781
1782 - a0_portstatus(ppd, rsp, vl_select_mask);
1783 + a0_portstatus(ppd, rsp);
1784
1785 if (resp_len)
1786 *resp_len += response_data_size;
1787 @@ -2931,16 +2929,14 @@ static u64 get_error_counter_summary(struct ib_device *ibdev, u8 port,
1788 return error_counter_summary;
1789 }
1790
1791 -static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp,
1792 - u32 vl_select_mask)
1793 +static void a0_datacounters(struct hfi1_pportdata *ppd, struct _port_dctrs *rsp)
1794 {
1795 if (!is_bx(ppd->dd)) {
1796 unsigned long vl;
1797 u64 sum_vl_xmit_wait = 0;
1798 - u32 vl_all_mask = VL_MASK_ALL;
1799 + unsigned long vl_all_mask = VL_MASK_ALL;
1800
1801 - for_each_set_bit(vl, (unsigned long *)&(vl_all_mask),
1802 - 8 * sizeof(vl_all_mask)) {
1803 + for_each_set_bit(vl, &vl_all_mask, BITS_PER_LONG) {
1804 u64 tmp = sum_vl_xmit_wait +
1805 read_port_cntr(ppd, C_TX_WAIT_VL,
1806 idx_from_vl(vl));
1807 @@ -2995,7 +2991,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
1808 u64 port_mask;
1809 u8 port_num;
1810 unsigned long vl;
1811 - u32 vl_select_mask;
1812 + unsigned long vl_select_mask;
1813 int vfi;
1814 u16 link_width;
1815 u16 link_speed;
1816 @@ -3073,8 +3069,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
1817 * So in the for_each_set_bit() loop below, we don't need
1818 * any additional checks for vl.
1819 */
1820 - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
1821 - 8 * sizeof(req->vl_select_mask)) {
1822 + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
1823 memset(vlinfo, 0, sizeof(*vlinfo));
1824
1825 rsp->vls[vfi].port_vl_xmit_data =
1826 @@ -3122,7 +3117,7 @@ static int pma_get_opa_datacounters(struct opa_pma_mad *pmp,
1827 vfi++;
1828 }
1829
1830 - a0_datacounters(ppd, rsp, vl_select_mask);
1831 + a0_datacounters(ppd, rsp);
1832
1833 if (resp_len)
1834 *resp_len += response_data_size;
1835 @@ -3217,7 +3212,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
1836 struct _vls_ectrs *vlinfo;
1837 unsigned long vl;
1838 u64 port_mask, tmp;
1839 - u32 vl_select_mask;
1840 + unsigned long vl_select_mask;
1841 int vfi;
1842
1843 req = (struct opa_port_error_counters64_msg *)pmp->data;
1844 @@ -3276,8 +3271,7 @@ static int pma_get_opa_porterrors(struct opa_pma_mad *pmp,
1845 vlinfo = &rsp->vls[0];
1846 vfi = 0;
1847 vl_select_mask = be32_to_cpu(req->vl_select_mask);
1848 - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
1849 - 8 * sizeof(req->vl_select_mask)) {
1850 + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
1851 memset(vlinfo, 0, sizeof(*vlinfo));
1852 rsp->vls[vfi].port_vl_xmit_discards =
1853 cpu_to_be64(read_port_cntr(ppd, C_SW_XMIT_DSCD_VL,
1854 @@ -3488,7 +3482,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
1855 u32 nports = be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24;
1856 u64 portn = be64_to_cpu(req->port_select_mask[3]);
1857 u32 counter_select = be32_to_cpu(req->counter_select_mask);
1858 - u32 vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
1859 + unsigned long vl_select_mask = VL_MASK_ALL; /* clear all per-vl cnts */
1860 unsigned long vl;
1861
1862 if ((nports != 1) || (portn != 1 << port)) {
1863 @@ -3582,8 +3576,7 @@ static int pma_set_opa_portstatus(struct opa_pma_mad *pmp,
1864 if (counter_select & CS_UNCORRECTABLE_ERRORS)
1865 write_dev_cntr(dd, C_DC_UNC_ERR, CNTR_INVALID_VL, 0);
1866
1867 - for_each_set_bit(vl, (unsigned long *)&(vl_select_mask),
1868 - 8 * sizeof(vl_select_mask)) {
1869 + for_each_set_bit(vl, &vl_select_mask, BITS_PER_LONG) {
1870 if (counter_select & CS_PORT_XMIT_DATA)
1871 write_port_cntr(ppd, C_TX_FLIT_VL, idx_from_vl(vl), 0);
1872
1873 diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1874 index 53eccc0da8fd..c05eae93170e 100644
1875 --- a/drivers/infiniband/hw/mlx5/main.c
1876 +++ b/drivers/infiniband/hw/mlx5/main.c
1877 @@ -6370,6 +6370,7 @@ static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
1878 mlx5_ib_unbind_slave_port(mpi->ibdev, mpi);
1879 list_del(&mpi->list);
1880 mutex_unlock(&mlx5_ib_multiport_mutex);
1881 + kfree(mpi);
1882 return;
1883 }
1884
1885 diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
1886 index ab5eba6edf82..e13ea199f589 100644
1887 --- a/drivers/iommu/Makefile
1888 +++ b/drivers/iommu/Makefile
1889 @@ -10,7 +10,7 @@ obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o
1890 obj-$(CONFIG_IOMMU_IOVA) += iova.o
1891 obj-$(CONFIG_OF_IOMMU) += of_iommu.o
1892 obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
1893 -obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o
1894 +obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
1895 obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
1896 obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
1897 obj-$(CONFIG_ARM_SMMU) += arm-smmu.o
1898 diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
1899 index 69c269dc4f1b..1f2ed44de243 100644
1900 --- a/drivers/iommu/amd_iommu.c
1901 +++ b/drivers/iommu/amd_iommu.c
1902 @@ -2563,7 +2563,9 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
1903
1904 bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
1905 phys_addr = (sg_phys(s) & PAGE_MASK) + (j << PAGE_SHIFT);
1906 - ret = iommu_map_page(domain, bus_addr, phys_addr, PAGE_SIZE, prot, GFP_ATOMIC);
1907 + ret = iommu_map_page(domain, bus_addr, phys_addr,
1908 + PAGE_SIZE, prot,
1909 + GFP_ATOMIC | __GFP_NOWARN);
1910 if (ret)
1911 goto out_unmap;
1912
1913 diff --git a/drivers/iommu/amd_iommu.h b/drivers/iommu/amd_iommu.h
1914 new file mode 100644
1915 index 000000000000..12d540d9b59b
1916 --- /dev/null
1917 +++ b/drivers/iommu/amd_iommu.h
1918 @@ -0,0 +1,14 @@
1919 +/* SPDX-License-Identifier: GPL-2.0-only */
1920 +
1921 +#ifndef AMD_IOMMU_H
1922 +#define AMD_IOMMU_H
1923 +
1924 +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line);
1925 +
1926 +#ifdef CONFIG_DMI
1927 +void amd_iommu_apply_ivrs_quirks(void);
1928 +#else
1929 +static void amd_iommu_apply_ivrs_quirks(void) { }
1930 +#endif
1931 +
1932 +#endif
1933 diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
1934 index 66b4800bcdd8..1e9a5da562f0 100644
1935 --- a/drivers/iommu/amd_iommu_init.c
1936 +++ b/drivers/iommu/amd_iommu_init.c
1937 @@ -39,6 +39,7 @@
1938 #include <asm/irq_remapping.h>
1939
1940 #include <linux/crash_dump.h>
1941 +#include "amd_iommu.h"
1942 #include "amd_iommu_proto.h"
1943 #include "amd_iommu_types.h"
1944 #include "irq_remapping.h"
1945 @@ -1002,7 +1003,7 @@ static void __init set_dev_entry_from_acpi(struct amd_iommu *iommu,
1946 set_iommu_for_device(iommu, devid);
1947 }
1948
1949 -static int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1950 +int __init add_special_device(u8 type, u8 id, u16 *devid, bool cmd_line)
1951 {
1952 struct devid_map *entry;
1953 struct list_head *list;
1954 @@ -1153,6 +1154,8 @@ static int __init init_iommu_from_acpi(struct amd_iommu *iommu,
1955 if (ret)
1956 return ret;
1957
1958 + amd_iommu_apply_ivrs_quirks();
1959 +
1960 /*
1961 * First save the recommended feature enable bits from ACPI
1962 */
1963 diff --git a/drivers/iommu/amd_iommu_quirks.c b/drivers/iommu/amd_iommu_quirks.c
1964 new file mode 100644
1965 index 000000000000..c235f79b7a20
1966 --- /dev/null
1967 +++ b/drivers/iommu/amd_iommu_quirks.c
1968 @@ -0,0 +1,92 @@
1969 +/* SPDX-License-Identifier: GPL-2.0-only */
1970 +
1971 +/*
1972 + * Quirks for AMD IOMMU
1973 + *
1974 + * Copyright (C) 2019 Kai-Heng Feng <kai.heng.feng@canonical.com>
1975 + */
1976 +
1977 +#ifdef CONFIG_DMI
1978 +#include <linux/dmi.h>
1979 +
1980 +#include "amd_iommu.h"
1981 +
1982 +#define IVHD_SPECIAL_IOAPIC 1
1983 +
1984 +struct ivrs_quirk_entry {
1985 + u8 id;
1986 + u16 devid;
1987 +};
1988 +
1989 +enum {
1990 + DELL_INSPIRON_7375 = 0,
1991 + DELL_LATITUDE_5495,
1992 + LENOVO_IDEAPAD_330S_15ARR,
1993 +};
1994 +
1995 +static const struct ivrs_quirk_entry ivrs_ioapic_quirks[][3] __initconst = {
1996 + /* ivrs_ioapic[4]=00:14.0 ivrs_ioapic[5]=00:00.2 */
1997 + [DELL_INSPIRON_7375] = {
1998 + { .id = 4, .devid = 0xa0 },
1999 + { .id = 5, .devid = 0x2 },
2000 + {}
2001 + },
2002 + /* ivrs_ioapic[4]=00:14.0 */
2003 + [DELL_LATITUDE_5495] = {
2004 + { .id = 4, .devid = 0xa0 },
2005 + {}
2006 + },
2007 + /* ivrs_ioapic[32]=00:14.0 */
2008 + [LENOVO_IDEAPAD_330S_15ARR] = {
2009 + { .id = 32, .devid = 0xa0 },
2010 + {}
2011 + },
2012 + {}
2013 +};
2014 +
2015 +static int __init ivrs_ioapic_quirk_cb(const struct dmi_system_id *d)
2016 +{
2017 + const struct ivrs_quirk_entry *i;
2018 +
2019 + for (i = d->driver_data; i->id != 0 && i->devid != 0; i++)
2020 + add_special_device(IVHD_SPECIAL_IOAPIC, i->id, (u16 *)&i->devid, 0);
2021 +
2022 + return 0;
2023 +}
2024 +
2025 +static const struct dmi_system_id ivrs_quirks[] __initconst = {
2026 + {
2027 + .callback = ivrs_ioapic_quirk_cb,
2028 + .ident = "Dell Inspiron 7375",
2029 + .matches = {
2030 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2031 + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7375"),
2032 + },
2033 + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_INSPIRON_7375],
2034 + },
2035 + {
2036 + .callback = ivrs_ioapic_quirk_cb,
2037 + .ident = "Dell Latitude 5495",
2038 + .matches = {
2039 + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
2040 + DMI_MATCH(DMI_PRODUCT_NAME, "Latitude 5495"),
2041 + },
2042 + .driver_data = (void *)&ivrs_ioapic_quirks[DELL_LATITUDE_5495],
2043 + },
2044 + {
2045 + .callback = ivrs_ioapic_quirk_cb,
2046 + .ident = "Lenovo ideapad 330S-15ARR",
2047 + .matches = {
2048 + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
2049 + DMI_MATCH(DMI_PRODUCT_NAME, "81FB"),
2050 + },
2051 + .driver_data = (void *)&ivrs_ioapic_quirks[LENOVO_IDEAPAD_330S_15ARR],
2052 + },
2053 + {}
2054 +};
2055 +
2056 +void __init amd_iommu_apply_ivrs_quirks(void)
2057 +{
2058 + dmi_check_system(ivrs_quirks);
2059 +}
2060 +#endif
2061 diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
2062 index 9a576ae837dc..da4516fbf542 100644
2063 --- a/drivers/iommu/iova.c
2064 +++ b/drivers/iommu/iova.c
2065 @@ -580,7 +580,9 @@ void queue_iova(struct iova_domain *iovad,
2066
2067 spin_unlock_irqrestore(&fq->lock, flags);
2068
2069 - if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
2070 + /* Avoid false sharing as much as possible. */
2071 + if (!atomic_read(&iovad->fq_timer_on) &&
2072 + !atomic_cmpxchg(&iovad->fq_timer_on, 0, 1))
2073 mod_timer(&iovad->fq_timer,
2074 jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
2075 }
2076 diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
2077 index a73337b74f41..db588a79a9f0 100644
2078 --- a/drivers/isdn/mISDN/socket.c
2079 +++ b/drivers/isdn/mISDN/socket.c
2080 @@ -764,6 +764,8 @@ base_sock_create(struct net *net, struct socket *sock, int protocol, int kern)
2081
2082 if (sock->type != SOCK_RAW)
2083 return -ESOCKTNOSUPPORT;
2084 + if (!capable(CAP_NET_RAW))
2085 + return -EPERM;
2086
2087 sk = sk_alloc(net, PF_ISDN, GFP_KERNEL, &mISDN_proto, kern);
2088 if (!sk)
2089 diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c
2090 index 17d73db1456e..e4cb3811e82a 100644
2091 --- a/drivers/leds/led-triggers.c
2092 +++ b/drivers/leds/led-triggers.c
2093 @@ -177,6 +177,7 @@ err_activate:
2094 list_del(&led_cdev->trig_list);
2095 write_unlock_irqrestore(&led_cdev->trigger->leddev_list_lock, flags);
2096 led_set_brightness(led_cdev, LED_OFF);
2097 + kfree(event);
2098
2099 return ret;
2100 }
2101 diff --git a/drivers/leds/leds-lp5562.c b/drivers/leds/leds-lp5562.c
2102 index 2a9009fe5545..18edc8bdc9f7 100644
2103 --- a/drivers/leds/leds-lp5562.c
2104 +++ b/drivers/leds/leds-lp5562.c
2105 @@ -263,7 +263,11 @@ static void lp5562_firmware_loaded(struct lp55xx_chip *chip)
2106 {
2107 const struct firmware *fw = chip->fw;
2108
2109 - if (fw->size > LP5562_PROGRAM_LENGTH) {
2110 + /*
2111 + * the firmware is encoded in ascii hex character, with 2 chars
2112 + * per byte
2113 + */
2114 + if (fw->size > (LP5562_PROGRAM_LENGTH * 2)) {
2115 dev_err(&chip->cl->dev, "firmware data size overflow: %zu\n",
2116 fw->size);
2117 return;
2118 diff --git a/drivers/md/bcache/closure.c b/drivers/md/bcache/closure.c
2119 index 73f5319295bc..c12cd809ab19 100644
2120 --- a/drivers/md/bcache/closure.c
2121 +++ b/drivers/md/bcache/closure.c
2122 @@ -105,8 +105,14 @@ struct closure_syncer {
2123
2124 static void closure_sync_fn(struct closure *cl)
2125 {
2126 - cl->s->done = 1;
2127 - wake_up_process(cl->s->task);
2128 + struct closure_syncer *s = cl->s;
2129 + struct task_struct *p;
2130 +
2131 + rcu_read_lock();
2132 + p = READ_ONCE(s->task);
2133 + s->done = 1;
2134 + wake_up_process(p);
2135 + rcu_read_unlock();
2136 }
2137
2138 void __sched __closure_sync(struct closure *cl)
2139 diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
2140 index 17c6a73c536c..4d36373e1c0f 100644
2141 --- a/drivers/md/dm-rq.c
2142 +++ b/drivers/md/dm-rq.c
2143 @@ -505,6 +505,7 @@ check_again:
2144 ret = dm_dispatch_clone_request(clone, rq);
2145 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
2146 blk_rq_unprep_clone(clone);
2147 + blk_mq_cleanup_rq(clone);
2148 tio->ti->type->release_clone_rq(clone, &tio->info);
2149 tio->clone = NULL;
2150 if (!rq->q->mq_ops)
2151 diff --git a/drivers/md/md.c b/drivers/md/md.c
2152 index fb5d702e43b5..a8fbaa384e9a 100644
2153 --- a/drivers/md/md.c
2154 +++ b/drivers/md/md.c
2155 @@ -1770,8 +1770,15 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
2156 if (!(le32_to_cpu(sb->feature_map) &
2157 MD_FEATURE_RECOVERY_BITMAP))
2158 rdev->saved_raid_disk = -1;
2159 - } else
2160 - set_bit(In_sync, &rdev->flags);
2161 + } else {
2162 + /*
2163 + * If the array is FROZEN, then the device can't
2164 + * be in_sync with rest of array.
2165 + */
2166 + if (!test_bit(MD_RECOVERY_FROZEN,
2167 + &mddev->recovery))
2168 + set_bit(In_sync, &rdev->flags);
2169 + }
2170 rdev->raid_disk = role;
2171 break;
2172 }
2173 @@ -4116,7 +4123,7 @@ array_state_show(struct mddev *mddev, char *page)
2174 {
2175 enum array_state st = inactive;
2176
2177 - if (mddev->pers)
2178 + if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags))
2179 switch(mddev->ro) {
2180 case 1:
2181 st = readonly;
2182 @@ -5671,9 +5678,6 @@ int md_run(struct mddev *mddev)
2183 md_update_sb(mddev, 0);
2184
2185 md_new_event(mddev);
2186 - sysfs_notify_dirent_safe(mddev->sysfs_state);
2187 - sysfs_notify_dirent_safe(mddev->sysfs_action);
2188 - sysfs_notify(&mddev->kobj, NULL, "degraded");
2189 return 0;
2190
2191 abort:
2192 @@ -5687,6 +5691,7 @@ static int do_md_run(struct mddev *mddev)
2193 {
2194 int err;
2195
2196 + set_bit(MD_NOT_READY, &mddev->flags);
2197 err = md_run(mddev);
2198 if (err)
2199 goto out;
2200 @@ -5707,9 +5712,14 @@ static int do_md_run(struct mddev *mddev)
2201
2202 set_capacity(mddev->gendisk, mddev->array_sectors);
2203 revalidate_disk(mddev->gendisk);
2204 + clear_bit(MD_NOT_READY, &mddev->flags);
2205 mddev->changed = 1;
2206 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
2207 + sysfs_notify_dirent_safe(mddev->sysfs_state);
2208 + sysfs_notify_dirent_safe(mddev->sysfs_action);
2209 + sysfs_notify(&mddev->kobj, NULL, "degraded");
2210 out:
2211 + clear_bit(MD_NOT_READY, &mddev->flags);
2212 return err;
2213 }
2214
2215 @@ -8797,6 +8807,7 @@ void md_check_recovery(struct mddev *mddev)
2216
2217 if (mddev_trylock(mddev)) {
2218 int spares = 0;
2219 + bool try_set_sync = mddev->safemode != 0;
2220
2221 if (!mddev->external && mddev->safemode == 1)
2222 mddev->safemode = 0;
2223 @@ -8842,7 +8853,7 @@ void md_check_recovery(struct mddev *mddev)
2224 }
2225 }
2226
2227 - if (!mddev->external && !mddev->in_sync) {
2228 + if (try_set_sync && !mddev->external && !mddev->in_sync) {
2229 spin_lock(&mddev->lock);
2230 set_in_sync(mddev);
2231 spin_unlock(&mddev->lock);
2232 @@ -8948,7 +8959,8 @@ void md_reap_sync_thread(struct mddev *mddev)
2233 /* resync has finished, collect result */
2234 md_unregister_thread(&mddev->sync_thread);
2235 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) &&
2236 - !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2237 + !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2238 + mddev->degraded != mddev->raid_disks) {
2239 /* success...*/
2240 /* activate any spares */
2241 if (mddev->pers->spare_active(mddev)) {
2242 diff --git a/drivers/md/md.h b/drivers/md/md.h
2243 index 325cb2136a49..4f89463e0b01 100644
2244 --- a/drivers/md/md.h
2245 +++ b/drivers/md/md.h
2246 @@ -243,6 +243,9 @@ enum mddev_flags {
2247 MD_UPDATING_SB, /* md_check_recovery is updating the metadata
2248 * without explicitly holding reconfig_mutex.
2249 */
2250 + MD_NOT_READY, /* do_md_run() is active, so 'array_state'
2251 + * must not report that array is ready yet
2252 + */
2253 };
2254
2255 enum mddev_sb_flags {
2256 diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
2257 index f4daa56d204d..43fa7dbf844b 100644
2258 --- a/drivers/md/raid0.c
2259 +++ b/drivers/md/raid0.c
2260 @@ -26,6 +26,9 @@
2261 #include "raid0.h"
2262 #include "raid5.h"
2263
2264 +static int default_layout = 0;
2265 +module_param(default_layout, int, 0644);
2266 +
2267 #define UNSUPPORTED_MDDEV_FLAGS \
2268 ((1L << MD_HAS_JOURNAL) | \
2269 (1L << MD_JOURNAL_CLEAN) | \
2270 @@ -146,6 +149,19 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
2271 }
2272 pr_debug("md/raid0:%s: FINAL %d zones\n",
2273 mdname(mddev), conf->nr_strip_zones);
2274 +
2275 + if (conf->nr_strip_zones == 1) {
2276 + conf->layout = RAID0_ORIG_LAYOUT;
2277 + } else if (default_layout == RAID0_ORIG_LAYOUT ||
2278 + default_layout == RAID0_ALT_MULTIZONE_LAYOUT) {
2279 + conf->layout = default_layout;
2280 + } else {
2281 + pr_err("md/raid0:%s: cannot assemble multi-zone RAID0 with default_layout setting\n",
2282 + mdname(mddev));
2283 + pr_err("md/raid0: please set raid.default_layout to 1 or 2\n");
2284 + err = -ENOTSUPP;
2285 + goto abort;
2286 + }
2287 /*
2288 * now since we have the hard sector sizes, we can make sure
2289 * chunk size is a multiple of that sector size
2290 @@ -555,10 +571,12 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
2291
2292 static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
2293 {
2294 + struct r0conf *conf = mddev->private;
2295 struct strip_zone *zone;
2296 struct md_rdev *tmp_dev;
2297 sector_t bio_sector;
2298 sector_t sector;
2299 + sector_t orig_sector;
2300 unsigned chunk_sects;
2301 unsigned sectors;
2302
2303 @@ -592,8 +610,21 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
2304 bio = split;
2305 }
2306
2307 + orig_sector = sector;
2308 zone = find_zone(mddev->private, &sector);
2309 - tmp_dev = map_sector(mddev, zone, sector, &sector);
2310 + switch (conf->layout) {
2311 + case RAID0_ORIG_LAYOUT:
2312 + tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
2313 + break;
2314 + case RAID0_ALT_MULTIZONE_LAYOUT:
2315 + tmp_dev = map_sector(mddev, zone, sector, &sector);
2316 + break;
2317 + default:
2318 + WARN("md/raid0:%s: Invalid layout\n", mdname(mddev));
2319 + bio_io_error(bio);
2320 + return true;
2321 + }
2322 +
2323 bio_set_dev(bio, tmp_dev->bdev);
2324 bio->bi_iter.bi_sector = sector + zone->dev_start +
2325 tmp_dev->data_offset;
2326 diff --git a/drivers/md/raid0.h b/drivers/md/raid0.h
2327 index 540e65d92642..3816e5477db1 100644
2328 --- a/drivers/md/raid0.h
2329 +++ b/drivers/md/raid0.h
2330 @@ -8,11 +8,25 @@ struct strip_zone {
2331 int nb_dev; /* # of devices attached to the zone */
2332 };
2333
2334 +/* Linux 3.14 (20d0189b101) made an unintended change to
2335 + * the RAID0 layout for multi-zone arrays (where devices aren't all
2336 + * the same size.
2337 + * RAID0_ORIG_LAYOUT restores the original layout
2338 + * RAID0_ALT_MULTIZONE_LAYOUT uses the altered layout
2339 + * The layouts are identical when there is only one zone (all
2340 + * devices the same size).
2341 + */
2342 +
2343 +enum r0layout {
2344 + RAID0_ORIG_LAYOUT = 1,
2345 + RAID0_ALT_MULTIZONE_LAYOUT = 2,
2346 +};
2347 struct r0conf {
2348 struct strip_zone *strip_zone;
2349 struct md_rdev **devlist; /* lists of rdevs, pointed to
2350 * by strip_zone->dev */
2351 int nr_strip_zones;
2352 + enum r0layout layout;
2353 };
2354
2355 #endif
2356 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
2357 index fa47249fa3e4..6929d110d804 100644
2358 --- a/drivers/md/raid1.c
2359 +++ b/drivers/md/raid1.c
2360 @@ -434,19 +434,21 @@ static void raid1_end_write_request(struct bio *bio)
2361 /* We never try FailFast to WriteMostly devices */
2362 !test_bit(WriteMostly, &rdev->flags)) {
2363 md_error(r1_bio->mddev, rdev);
2364 - if (!test_bit(Faulty, &rdev->flags))
2365 - /* This is the only remaining device,
2366 - * We need to retry the write without
2367 - * FailFast
2368 - */
2369 - set_bit(R1BIO_WriteError, &r1_bio->state);
2370 - else {
2371 - /* Finished with this branch */
2372 - r1_bio->bios[mirror] = NULL;
2373 - to_put = bio;
2374 - }
2375 - } else
2376 + }
2377 +
2378 + /*
2379 + * When the device is faulty, it is not necessary to
2380 + * handle write error.
2381 + * For failfast, this is the only remaining device,
2382 + * We need to retry the write without FailFast.
2383 + */
2384 + if (!test_bit(Faulty, &rdev->flags))
2385 set_bit(R1BIO_WriteError, &r1_bio->state);
2386 + else {
2387 + /* Finished with this branch */
2388 + r1_bio->bios[mirror] = NULL;
2389 + to_put = bio;
2390 + }
2391 } else {
2392 /*
2393 * Set R1BIO_Uptodate in our master bio, so that we
2394 @@ -3103,6 +3105,13 @@ static int raid1_run(struct mddev *mddev)
2395 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
2396 test_bit(Faulty, &conf->mirrors[i].rdev->flags))
2397 mddev->degraded++;
2398 + /*
2399 + * RAID1 needs at least one disk in active
2400 + */
2401 + if (conf->raid_disks - mddev->degraded < 1) {
2402 + ret = -EINVAL;
2403 + goto abort;
2404 + }
2405
2406 if (conf->raid_disks - mddev->degraded == 1)
2407 mddev->recovery_cp = MaxSector;
2408 @@ -3136,8 +3145,12 @@ static int raid1_run(struct mddev *mddev)
2409 ret = md_integrity_register(mddev);
2410 if (ret) {
2411 md_unregister_thread(&mddev->thread);
2412 - raid1_free(mddev, conf);
2413 + goto abort;
2414 }
2415 + return 0;
2416 +
2417 +abort:
2418 + raid1_free(mddev, conf);
2419 return ret;
2420 }
2421
2422 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
2423 index a147619498df..4a5aad26ded7 100644
2424 --- a/drivers/md/raid5.c
2425 +++ b/drivers/md/raid5.c
2426 @@ -2540,7 +2540,8 @@ static void raid5_end_read_request(struct bio * bi)
2427 int set_bad = 0;
2428
2429 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
2430 - atomic_inc(&rdev->read_errors);
2431 + if (!(bi->bi_status == BLK_STS_PROTECTION))
2432 + atomic_inc(&rdev->read_errors);
2433 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
2434 pr_warn_ratelimited(
2435 "md/raid:%s: read error on replacement device (sector %llu on %s).\n",
2436 @@ -2572,7 +2573,9 @@ static void raid5_end_read_request(struct bio * bi)
2437 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
2438 retry = 1;
2439 if (retry)
2440 - if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
2441 + if (sh->qd_idx >= 0 && sh->pd_idx == i)
2442 + set_bit(R5_ReadError, &sh->dev[i].flags);
2443 + else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) {
2444 set_bit(R5_ReadError, &sh->dev[i].flags);
2445 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
2446 } else
2447 @@ -5721,7 +5724,8 @@ static bool raid5_make_request(struct mddev *mddev, struct bio * bi)
2448 do_flush = false;
2449 }
2450
2451 - set_bit(STRIPE_HANDLE, &sh->state);
2452 + if (!sh->batch_head)
2453 + set_bit(STRIPE_HANDLE, &sh->state);
2454 clear_bit(STRIPE_DELAYED, &sh->state);
2455 if ((!sh->batch_head || sh == sh->batch_head) &&
2456 (bi->bi_opf & REQ_SYNC) &&
2457 diff --git a/drivers/media/cec/cec-notifier.c b/drivers/media/cec/cec-notifier.c
2458 index dd2078b27a41..2424680f71c3 100644
2459 --- a/drivers/media/cec/cec-notifier.c
2460 +++ b/drivers/media/cec/cec-notifier.c
2461 @@ -123,6 +123,8 @@ void cec_notifier_unregister(struct cec_notifier *n)
2462 {
2463 mutex_lock(&n->lock);
2464 n->callback = NULL;
2465 + n->cec_adap->notifier = NULL;
2466 + n->cec_adap = NULL;
2467 mutex_unlock(&n->lock);
2468 cec_notifier_put(n);
2469 }
2470 diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
2471 index c4e7ebfe4d29..8a61150ee249 100644
2472 --- a/drivers/media/dvb-core/dvb_frontend.c
2473 +++ b/drivers/media/dvb-core/dvb_frontend.c
2474 @@ -164,6 +164,9 @@ static void dvb_frontend_free(struct kref *ref)
2475
2476 static void dvb_frontend_put(struct dvb_frontend *fe)
2477 {
2478 + /* call detach before dropping the reference count */
2479 + if (fe->ops.detach)
2480 + fe->ops.detach(fe);
2481 /*
2482 * Check if the frontend was registered, as otherwise
2483 * kref was not initialized yet.
2484 @@ -3035,7 +3038,6 @@ void dvb_frontend_detach(struct dvb_frontend *fe)
2485 dvb_frontend_invoke_release(fe, fe->ops.release_sec);
2486 dvb_frontend_invoke_release(fe, fe->ops.tuner_ops.release);
2487 dvb_frontend_invoke_release(fe, fe->ops.analog_ops.release);
2488 - dvb_frontend_invoke_release(fe, fe->ops.detach);
2489 dvb_frontend_put(fe);
2490 }
2491 EXPORT_SYMBOL(dvb_frontend_detach);
2492 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
2493 index 3c8778570331..04dc2f4bc7aa 100644
2494 --- a/drivers/media/dvb-core/dvbdev.c
2495 +++ b/drivers/media/dvb-core/dvbdev.c
2496 @@ -339,8 +339,10 @@ static int dvb_create_media_entity(struct dvb_device *dvbdev,
2497 if (npads) {
2498 dvbdev->pads = kcalloc(npads, sizeof(*dvbdev->pads),
2499 GFP_KERNEL);
2500 - if (!dvbdev->pads)
2501 + if (!dvbdev->pads) {
2502 + kfree(dvbdev->entity);
2503 return -ENOMEM;
2504 + }
2505 }
2506
2507 switch (type) {
2508 diff --git a/drivers/media/dvb-frontends/dvb-pll.c b/drivers/media/dvb-frontends/dvb-pll.c
2509 index 29836c1a40e9..ee830c76e4b3 100644
2510 --- a/drivers/media/dvb-frontends/dvb-pll.c
2511 +++ b/drivers/media/dvb-frontends/dvb-pll.c
2512 @@ -18,6 +18,7 @@
2513
2514 #include <linux/slab.h>
2515 #include <linux/module.h>
2516 +#include <linux/idr.h>
2517 #include <linux/dvb/frontend.h>
2518 #include <asm/types.h>
2519
2520 @@ -43,8 +44,7 @@ struct dvb_pll_priv {
2521 };
2522
2523 #define DVB_PLL_MAX 64
2524 -
2525 -static unsigned int dvb_pll_devcount;
2526 +static DEFINE_IDA(pll_ida);
2527
2528 static int debug;
2529 module_param(debug, int, 0644);
2530 @@ -796,6 +796,7 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
2531 struct dvb_pll_priv *priv = NULL;
2532 int ret;
2533 const struct dvb_pll_desc *desc;
2534 + int nr;
2535
2536 b1 = kmalloc(1, GFP_KERNEL);
2537 if (!b1)
2538 @@ -804,9 +805,14 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
2539 b1[0] = 0;
2540 msg.buf = b1;
2541
2542 - if ((id[dvb_pll_devcount] > DVB_PLL_UNDEFINED) &&
2543 - (id[dvb_pll_devcount] < ARRAY_SIZE(pll_list)))
2544 - pll_desc_id = id[dvb_pll_devcount];
2545 + nr = ida_simple_get(&pll_ida, 0, DVB_PLL_MAX, GFP_KERNEL);
2546 + if (nr < 0) {
2547 + kfree(b1);
2548 + return NULL;
2549 + }
2550 +
2551 + if (id[nr] > DVB_PLL_UNDEFINED && id[nr] < ARRAY_SIZE(pll_list))
2552 + pll_desc_id = id[nr];
2553
2554 BUG_ON(pll_desc_id < 1 || pll_desc_id >= ARRAY_SIZE(pll_list));
2555
2556 @@ -817,24 +823,20 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
2557 fe->ops.i2c_gate_ctrl(fe, 1);
2558
2559 ret = i2c_transfer (i2c, &msg, 1);
2560 - if (ret != 1) {
2561 - kfree(b1);
2562 - return NULL;
2563 - }
2564 + if (ret != 1)
2565 + goto out;
2566 if (fe->ops.i2c_gate_ctrl)
2567 fe->ops.i2c_gate_ctrl(fe, 0);
2568 }
2569
2570 priv = kzalloc(sizeof(struct dvb_pll_priv), GFP_KERNEL);
2571 - if (!priv) {
2572 - kfree(b1);
2573 - return NULL;
2574 - }
2575 + if (!priv)
2576 + goto out;
2577
2578 priv->pll_i2c_address = pll_addr;
2579 priv->i2c = i2c;
2580 priv->pll_desc = desc;
2581 - priv->nr = dvb_pll_devcount++;
2582 + priv->nr = nr;
2583
2584 memcpy(&fe->ops.tuner_ops, &dvb_pll_tuner_ops,
2585 sizeof(struct dvb_tuner_ops));
2586 @@ -867,6 +869,11 @@ struct dvb_frontend *dvb_pll_attach(struct dvb_frontend *fe, int pll_addr,
2587 kfree(b1);
2588
2589 return fe;
2590 +out:
2591 + kfree(b1);
2592 + ida_simple_remove(&pll_ida, nr);
2593 +
2594 + return NULL;
2595 }
2596 EXPORT_SYMBOL(dvb_pll_attach);
2597
2598 @@ -903,9 +910,10 @@ dvb_pll_probe(struct i2c_client *client, const struct i2c_device_id *id)
2599
2600 static int dvb_pll_remove(struct i2c_client *client)
2601 {
2602 - struct dvb_frontend *fe;
2603 + struct dvb_frontend *fe = i2c_get_clientdata(client);
2604 + struct dvb_pll_priv *priv = fe->tuner_priv;
2605
2606 - fe = i2c_get_clientdata(client);
2607 + ida_simple_remove(&pll_ida, priv->nr);
2608 dvb_pll_release(fe);
2609 return 0;
2610 }
2611 diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c
2612 index d5c0ffc55d46..a3bbef682fb8 100644
2613 --- a/drivers/media/i2c/ov5640.c
2614 +++ b/drivers/media/i2c/ov5640.c
2615 @@ -2787,9 +2787,14 @@ static int ov5640_probe(struct i2c_client *client,
2616 /* request optional power down pin */
2617 sensor->pwdn_gpio = devm_gpiod_get_optional(dev, "powerdown",
2618 GPIOD_OUT_HIGH);
2619 + if (IS_ERR(sensor->pwdn_gpio))
2620 + return PTR_ERR(sensor->pwdn_gpio);
2621 +
2622 /* request optional reset pin */
2623 sensor->reset_gpio = devm_gpiod_get_optional(dev, "reset",
2624 GPIOD_OUT_HIGH);
2625 + if (IS_ERR(sensor->reset_gpio))
2626 + return PTR_ERR(sensor->reset_gpio);
2627
2628 v4l2_i2c_subdev_init(&sensor->sd, client, &ov5640_subdev_ops);
2629
2630 diff --git a/drivers/media/i2c/ov5645.c b/drivers/media/i2c/ov5645.c
2631 index 1722cdab0daf..34343bc10007 100644
2632 --- a/drivers/media/i2c/ov5645.c
2633 +++ b/drivers/media/i2c/ov5645.c
2634 @@ -53,6 +53,8 @@
2635 #define OV5645_CHIP_ID_HIGH_BYTE 0x56
2636 #define OV5645_CHIP_ID_LOW 0x300b
2637 #define OV5645_CHIP_ID_LOW_BYTE 0x45
2638 +#define OV5645_IO_MIPI_CTRL00 0x300e
2639 +#define OV5645_PAD_OUTPUT00 0x3019
2640 #define OV5645_AWB_MANUAL_CONTROL 0x3406
2641 #define OV5645_AWB_MANUAL_ENABLE BIT(0)
2642 #define OV5645_AEC_PK_MANUAL 0x3503
2643 @@ -63,6 +65,7 @@
2644 #define OV5645_ISP_VFLIP BIT(2)
2645 #define OV5645_TIMING_TC_REG21 0x3821
2646 #define OV5645_SENSOR_MIRROR BIT(1)
2647 +#define OV5645_MIPI_CTRL00 0x4800
2648 #define OV5645_PRE_ISP_TEST_SETTING_1 0x503d
2649 #define OV5645_TEST_PATTERN_MASK 0x3
2650 #define OV5645_SET_TEST_PATTERN(x) ((x) & OV5645_TEST_PATTERN_MASK)
2651 @@ -129,7 +132,6 @@ static const struct reg_value ov5645_global_init_setting[] = {
2652 { 0x3503, 0x07 },
2653 { 0x3002, 0x1c },
2654 { 0x3006, 0xc3 },
2655 - { 0x300e, 0x45 },
2656 { 0x3017, 0x00 },
2657 { 0x3018, 0x00 },
2658 { 0x302e, 0x0b },
2659 @@ -358,7 +360,10 @@ static const struct reg_value ov5645_global_init_setting[] = {
2660 { 0x3a1f, 0x14 },
2661 { 0x0601, 0x02 },
2662 { 0x3008, 0x42 },
2663 - { 0x3008, 0x02 }
2664 + { 0x3008, 0x02 },
2665 + { OV5645_IO_MIPI_CTRL00, 0x40 },
2666 + { OV5645_MIPI_CTRL00, 0x24 },
2667 + { OV5645_PAD_OUTPUT00, 0x70 }
2668 };
2669
2670 static const struct reg_value ov5645_setting_sxga[] = {
2671 @@ -745,13 +750,9 @@ static int ov5645_s_power(struct v4l2_subdev *sd, int on)
2672 goto exit;
2673 }
2674
2675 - ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
2676 - OV5645_SYSTEM_CTRL0_STOP);
2677 - if (ret < 0) {
2678 - ov5645_set_power_off(ov5645);
2679 - goto exit;
2680 - }
2681 + usleep_range(500, 1000);
2682 } else {
2683 + ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x58);
2684 ov5645_set_power_off(ov5645);
2685 }
2686 }
2687 @@ -1057,11 +1058,20 @@ static int ov5645_s_stream(struct v4l2_subdev *subdev, int enable)
2688 dev_err(ov5645->dev, "could not sync v4l2 controls\n");
2689 return ret;
2690 }
2691 +
2692 + ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x45);
2693 + if (ret < 0)
2694 + return ret;
2695 +
2696 ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
2697 OV5645_SYSTEM_CTRL0_START);
2698 if (ret < 0)
2699 return ret;
2700 } else {
2701 + ret = ov5645_write_reg(ov5645, OV5645_IO_MIPI_CTRL00, 0x40);
2702 + if (ret < 0)
2703 + return ret;
2704 +
2705 ret = ov5645_write_reg(ov5645, OV5645_SYSTEM_CTRL0,
2706 OV5645_SYSTEM_CTRL0_STOP);
2707 if (ret < 0)
2708 diff --git a/drivers/media/i2c/ov9650.c b/drivers/media/i2c/ov9650.c
2709 index 5bea31cd41aa..33a21d585dc9 100644
2710 --- a/drivers/media/i2c/ov9650.c
2711 +++ b/drivers/media/i2c/ov9650.c
2712 @@ -716,6 +716,11 @@ static int ov965x_set_gain(struct ov965x *ov965x, int auto_gain)
2713 for (m = 6; m >= 0; m--)
2714 if (gain >= (1 << m) * 16)
2715 break;
2716 +
2717 + /* Sanity check: don't adjust the gain with a negative value */
2718 + if (m < 0)
2719 + return -EINVAL;
2720 +
2721 rgain = (gain - ((1 << m) * 16)) / (1 << m);
2722 rgain |= (((1 << m) - 1) << 4);
2723
2724 diff --git a/drivers/media/pci/saa7134/saa7134-i2c.c b/drivers/media/pci/saa7134/saa7134-i2c.c
2725 index cf1e526de56a..8a1128c60680 100644
2726 --- a/drivers/media/pci/saa7134/saa7134-i2c.c
2727 +++ b/drivers/media/pci/saa7134/saa7134-i2c.c
2728 @@ -351,7 +351,11 @@ static const struct i2c_client saa7134_client_template = {
2729
2730 /* ----------------------------------------------------------- */
2731
2732 -/* On Medion 7134 reading EEPROM needs DVB-T demod i2c gate open */
2733 +/*
2734 + * On Medion 7134 reading the SAA7134 chip config EEPROM needs DVB-T
2735 + * demod i2c gate closed due to an address clash between this EEPROM
2736 + * and the demod one.
2737 + */
2738 static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
2739 {
2740 u8 subaddr = 0x7, dmdregval;
2741 @@ -368,14 +372,14 @@ static void saa7134_i2c_eeprom_md7134_gate(struct saa7134_dev *dev)
2742
2743 ret = i2c_transfer(&dev->i2c_adap, i2cgatemsg_r, 2);
2744 if ((ret == 2) && (dmdregval & 0x2)) {
2745 - pr_debug("%s: DVB-T demod i2c gate was left closed\n",
2746 + pr_debug("%s: DVB-T demod i2c gate was left open\n",
2747 dev->name);
2748
2749 data[0] = subaddr;
2750 data[1] = (dmdregval & ~0x2);
2751 if (i2c_transfer(&dev->i2c_adap, i2cgatemsg_w, 1) != 1)
2752 - pr_err("%s: EEPROM i2c gate open failure\n",
2753 - dev->name);
2754 + pr_err("%s: EEPROM i2c gate close failure\n",
2755 + dev->name);
2756 }
2757 }
2758
2759 diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
2760 index 6d8e4afe9673..8c56d4c37a52 100644
2761 --- a/drivers/media/pci/saa7146/hexium_gemini.c
2762 +++ b/drivers/media/pci/saa7146/hexium_gemini.c
2763 @@ -304,6 +304,9 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
2764 ret = saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER);
2765 if (ret < 0) {
2766 pr_err("cannot register capture v4l2 device. skipping.\n");
2767 + saa7146_vv_release(dev);
2768 + i2c_del_adapter(&hexium->i2c_adapter);
2769 + kfree(hexium);
2770 return ret;
2771 }
2772
2773 diff --git a/drivers/media/platform/exynos4-is/fimc-is.c b/drivers/media/platform/exynos4-is/fimc-is.c
2774 index 5ddb2321e9e4..0fe9be93fabe 100644
2775 --- a/drivers/media/platform/exynos4-is/fimc-is.c
2776 +++ b/drivers/media/platform/exynos4-is/fimc-is.c
2777 @@ -819,6 +819,7 @@ static int fimc_is_probe(struct platform_device *pdev)
2778 return -ENODEV;
2779
2780 is->pmu_regs = of_iomap(node, 0);
2781 + of_node_put(node);
2782 if (!is->pmu_regs)
2783 return -ENOMEM;
2784
2785 diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
2786 index deb499f76412..b5993532831d 100644
2787 --- a/drivers/media/platform/exynos4-is/media-dev.c
2788 +++ b/drivers/media/platform/exynos4-is/media-dev.c
2789 @@ -498,6 +498,7 @@ static int fimc_md_register_sensor_entities(struct fimc_md *fmd)
2790 continue;
2791
2792 ret = fimc_md_parse_port_node(fmd, port, index);
2793 + of_node_put(port);
2794 if (ret < 0) {
2795 of_node_put(node);
2796 goto rpm_put;
2797 @@ -531,6 +532,7 @@ static int __of_get_csis_id(struct device_node *np)
2798 if (!np)
2799 return -EINVAL;
2800 of_property_read_u32(np, "reg", &reg);
2801 + of_node_put(np);
2802 return reg - FIMC_INPUT_MIPI_CSI2_0;
2803 }
2804
2805 diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
2806 index 0273302aa741..83086eea1450 100644
2807 --- a/drivers/media/platform/fsl-viu.c
2808 +++ b/drivers/media/platform/fsl-viu.c
2809 @@ -37,7 +37,7 @@
2810 #define VIU_VERSION "0.5.1"
2811
2812 /* Allow building this driver with COMPILE_TEST */
2813 -#ifndef CONFIG_PPC
2814 +#if !defined(CONFIG_PPC) && !defined(CONFIG_MICROBLAZE)
2815 #define out_be32(v, a) iowrite32be(a, (void __iomem *)v)
2816 #define in_be32(a) ioread32be((void __iomem *)a)
2817 #endif
2818 diff --git a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
2819 index bbb24fb95b95..3deb0549b1a1 100644
2820 --- a/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
2821 +++ b/drivers/media/platform/mtk-mdp/mtk_mdp_core.c
2822 @@ -118,7 +118,9 @@ static int mtk_mdp_probe(struct platform_device *pdev)
2823 mutex_init(&mdp->vpulock);
2824
2825 /* Old dts had the components as child nodes */
2826 - if (of_get_next_child(dev->of_node, NULL)) {
2827 + node = of_get_next_child(dev->of_node, NULL);
2828 + if (node) {
2829 + of_node_put(node);
2830 parent = dev->of_node;
2831 dev_warn(dev, "device tree is out of date\n");
2832 } else {
2833 diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
2834 index 432bc7fbedc9..addd03b51748 100644
2835 --- a/drivers/media/platform/omap3isp/isp.c
2836 +++ b/drivers/media/platform/omap3isp/isp.c
2837 @@ -722,6 +722,10 @@ static int isp_pipeline_enable(struct isp_pipeline *pipe,
2838 s_stream, mode);
2839 pipe->do_propagation = true;
2840 }
2841 +
2842 + /* Stop at the first external sub-device. */
2843 + if (subdev->dev != isp->dev)
2844 + break;
2845 }
2846
2847 return 0;
2848 @@ -836,6 +840,10 @@ static int isp_pipeline_disable(struct isp_pipeline *pipe)
2849 &subdev->entity);
2850 failure = -ETIMEDOUT;
2851 }
2852 +
2853 + /* Stop at the first external sub-device. */
2854 + if (subdev->dev != isp->dev)
2855 + break;
2856 }
2857
2858 return failure;
2859 diff --git a/drivers/media/platform/omap3isp/ispccdc.c b/drivers/media/platform/omap3isp/ispccdc.c
2860 index 77b73e27a274..412438dce285 100644
2861 --- a/drivers/media/platform/omap3isp/ispccdc.c
2862 +++ b/drivers/media/platform/omap3isp/ispccdc.c
2863 @@ -2605,6 +2605,7 @@ int omap3isp_ccdc_register_entities(struct isp_ccdc_device *ccdc,
2864 int ret;
2865
2866 /* Register the subdev and video node. */
2867 + ccdc->subdev.dev = vdev->mdev->dev;
2868 ret = v4l2_device_register_subdev(vdev, &ccdc->subdev);
2869 if (ret < 0)
2870 goto error;
2871 diff --git a/drivers/media/platform/omap3isp/ispccp2.c b/drivers/media/platform/omap3isp/ispccp2.c
2872 index e062939d0d05..47b0d3fe87d8 100644
2873 --- a/drivers/media/platform/omap3isp/ispccp2.c
2874 +++ b/drivers/media/platform/omap3isp/ispccp2.c
2875 @@ -1034,6 +1034,7 @@ int omap3isp_ccp2_register_entities(struct isp_ccp2_device *ccp2,
2876 int ret;
2877
2878 /* Register the subdev and video nodes. */
2879 + ccp2->subdev.dev = vdev->mdev->dev;
2880 ret = v4l2_device_register_subdev(vdev, &ccp2->subdev);
2881 if (ret < 0)
2882 goto error;
2883 diff --git a/drivers/media/platform/omap3isp/ispcsi2.c b/drivers/media/platform/omap3isp/ispcsi2.c
2884 index a4d3d030e81e..e45292a1bf6c 100644
2885 --- a/drivers/media/platform/omap3isp/ispcsi2.c
2886 +++ b/drivers/media/platform/omap3isp/ispcsi2.c
2887 @@ -1201,6 +1201,7 @@ int omap3isp_csi2_register_entities(struct isp_csi2_device *csi2,
2888 int ret;
2889
2890 /* Register the subdev and video nodes. */
2891 + csi2->subdev.dev = vdev->mdev->dev;
2892 ret = v4l2_device_register_subdev(vdev, &csi2->subdev);
2893 if (ret < 0)
2894 goto error;
2895 diff --git a/drivers/media/platform/omap3isp/isppreview.c b/drivers/media/platform/omap3isp/isppreview.c
2896 index 3195f7c8b8b7..591c6de498f8 100644
2897 --- a/drivers/media/platform/omap3isp/isppreview.c
2898 +++ b/drivers/media/platform/omap3isp/isppreview.c
2899 @@ -2228,6 +2228,7 @@ int omap3isp_preview_register_entities(struct isp_prev_device *prev,
2900 int ret;
2901
2902 /* Register the subdev and video nodes. */
2903 + prev->subdev.dev = vdev->mdev->dev;
2904 ret = v4l2_device_register_subdev(vdev, &prev->subdev);
2905 if (ret < 0)
2906 goto error;
2907 diff --git a/drivers/media/platform/omap3isp/ispresizer.c b/drivers/media/platform/omap3isp/ispresizer.c
2908 index 0b6a87508584..2035e3c6a9de 100644
2909 --- a/drivers/media/platform/omap3isp/ispresizer.c
2910 +++ b/drivers/media/platform/omap3isp/ispresizer.c
2911 @@ -1684,6 +1684,7 @@ int omap3isp_resizer_register_entities(struct isp_res_device *res,
2912 int ret;
2913
2914 /* Register the subdev and video nodes. */
2915 + res->subdev.dev = vdev->mdev->dev;
2916 ret = v4l2_device_register_subdev(vdev, &res->subdev);
2917 if (ret < 0)
2918 goto error;
2919 diff --git a/drivers/media/platform/omap3isp/ispstat.c b/drivers/media/platform/omap3isp/ispstat.c
2920 index 47353fee26c3..bfa2d0504646 100644
2921 --- a/drivers/media/platform/omap3isp/ispstat.c
2922 +++ b/drivers/media/platform/omap3isp/ispstat.c
2923 @@ -1029,6 +1029,8 @@ void omap3isp_stat_unregister_entities(struct ispstat *stat)
2924 int omap3isp_stat_register_entities(struct ispstat *stat,
2925 struct v4l2_device *vdev)
2926 {
2927 + stat->subdev.dev = vdev->mdev->dev;
2928 +
2929 return v4l2_device_register_subdev(vdev, &stat->subdev);
2930 }
2931
2932 diff --git a/drivers/media/platform/rcar_fdp1.c b/drivers/media/platform/rcar_fdp1.c
2933 index 0d1467028811..5a30f1d84fe1 100644
2934 --- a/drivers/media/platform/rcar_fdp1.c
2935 +++ b/drivers/media/platform/rcar_fdp1.c
2936 @@ -2306,7 +2306,7 @@ static int fdp1_probe(struct platform_device *pdev)
2937 fdp1->fcp = rcar_fcp_get(fcp_node);
2938 of_node_put(fcp_node);
2939 if (IS_ERR(fdp1->fcp)) {
2940 - dev_err(&pdev->dev, "FCP not found (%ld)\n",
2941 + dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
2942 PTR_ERR(fdp1->fcp));
2943 return PTR_ERR(fdp1->fcp);
2944 }
2945 diff --git a/drivers/media/platform/vsp1/vsp1_dl.c b/drivers/media/platform/vsp1/vsp1_dl.c
2946 index 26289adaf658..a5634ca85a31 100644
2947 --- a/drivers/media/platform/vsp1/vsp1_dl.c
2948 +++ b/drivers/media/platform/vsp1/vsp1_dl.c
2949 @@ -557,8 +557,10 @@ static struct vsp1_dl_list *vsp1_dl_list_alloc(struct vsp1_dl_manager *dlm)
2950
2951 /* Get a default body for our list. */
2952 dl->body0 = vsp1_dl_body_get(dlm->pool);
2953 - if (!dl->body0)
2954 + if (!dl->body0) {
2955 + kfree(dl);
2956 return NULL;
2957 + }
2958
2959 header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries);
2960
2961 diff --git a/drivers/media/radio/si470x/radio-si470x-usb.c b/drivers/media/radio/si470x/radio-si470x-usb.c
2962 index 313a95f195a2..19e381dd5808 100644
2963 --- a/drivers/media/radio/si470x/radio-si470x-usb.c
2964 +++ b/drivers/media/radio/si470x/radio-si470x-usb.c
2965 @@ -743,7 +743,7 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
2966 /* start radio */
2967 retval = si470x_start_usb(radio);
2968 if (retval < 0)
2969 - goto err_all;
2970 + goto err_buf;
2971
2972 /* set initial frequency */
2973 si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
2974 @@ -758,6 +758,8 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
2975
2976 return 0;
2977 err_all:
2978 + usb_kill_urb(radio->int_in_urb);
2979 +err_buf:
2980 kfree(radio->buffer);
2981 err_ctrl:
2982 v4l2_ctrl_handler_free(&radio->hdl);
2983 @@ -831,6 +833,7 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
2984 mutex_lock(&radio->lock);
2985 v4l2_device_disconnect(&radio->v4l2_dev);
2986 video_unregister_device(&radio->videodev);
2987 + usb_kill_urb(radio->int_in_urb);
2988 usb_set_intfdata(intf, NULL);
2989 mutex_unlock(&radio->lock);
2990 v4l2_device_put(&radio->v4l2_dev);
2991 diff --git a/drivers/media/rc/iguanair.c b/drivers/media/rc/iguanair.c
2992 index 7daac8bab83b..6f3030b2054d 100644
2993 --- a/drivers/media/rc/iguanair.c
2994 +++ b/drivers/media/rc/iguanair.c
2995 @@ -424,6 +424,10 @@ static int iguanair_probe(struct usb_interface *intf,
2996 int ret, pipein, pipeout;
2997 struct usb_host_interface *idesc;
2998
2999 + idesc = intf->altsetting;
3000 + if (idesc->desc.bNumEndpoints < 2)
3001 + return -ENODEV;
3002 +
3003 ir = kzalloc(sizeof(*ir), GFP_KERNEL);
3004 rc = rc_allocate_device(RC_DRIVER_IR_RAW);
3005 if (!ir || !rc) {
3006 @@ -438,18 +442,13 @@ static int iguanair_probe(struct usb_interface *intf,
3007 ir->urb_in = usb_alloc_urb(0, GFP_KERNEL);
3008 ir->urb_out = usb_alloc_urb(0, GFP_KERNEL);
3009
3010 - if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out) {
3011 + if (!ir->buf_in || !ir->packet || !ir->urb_in || !ir->urb_out ||
3012 + !usb_endpoint_is_int_in(&idesc->endpoint[0].desc) ||
3013 + !usb_endpoint_is_int_out(&idesc->endpoint[1].desc)) {
3014 ret = -ENOMEM;
3015 goto out;
3016 }
3017
3018 - idesc = intf->altsetting;
3019 -
3020 - if (idesc->desc.bNumEndpoints < 2) {
3021 - ret = -ENODEV;
3022 - goto out;
3023 - }
3024 -
3025 ir->rc = rc;
3026 ir->dev = &intf->dev;
3027 ir->udev = udev;
3028 diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
3029 index 1041c056854d..f23a220352f7 100644
3030 --- a/drivers/media/rc/imon.c
3031 +++ b/drivers/media/rc/imon.c
3032 @@ -1835,12 +1835,17 @@ static void imon_get_ffdc_type(struct imon_context *ictx)
3033 break;
3034 /* iMON VFD, MCE IR */
3035 case 0x46:
3036 - case 0x7e:
3037 case 0x9e:
3038 dev_info(ictx->dev, "0xffdc iMON VFD, MCE IR");
3039 detected_display_type = IMON_DISPLAY_TYPE_VFD;
3040 allowed_protos = RC_PROTO_BIT_RC6_MCE;
3041 break;
3042 + /* iMON VFD, iMON or MCE IR */
3043 + case 0x7e:
3044 + dev_info(ictx->dev, "0xffdc iMON VFD, iMON or MCE IR");
3045 + detected_display_type = IMON_DISPLAY_TYPE_VFD;
3046 + allowed_protos |= RC_PROTO_BIT_RC6_MCE;
3047 + break;
3048 /* iMON LCD, MCE IR */
3049 case 0x9f:
3050 dev_info(ictx->dev, "0xffdc iMON LCD, MCE IR");
3051 diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c
3052 index 4c0c8008872a..f1dfb8409432 100644
3053 --- a/drivers/media/rc/mceusb.c
3054 +++ b/drivers/media/rc/mceusb.c
3055 @@ -42,21 +42,22 @@
3056 #include <linux/pm_wakeup.h>
3057 #include <media/rc-core.h>
3058
3059 -#define DRIVER_VERSION "1.94"
3060 +#define DRIVER_VERSION "1.95"
3061 #define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>"
3062 #define DRIVER_DESC "Windows Media Center Ed. eHome Infrared Transceiver " \
3063 "device driver"
3064 #define DRIVER_NAME "mceusb"
3065
3066 +#define USB_TX_TIMEOUT 1000 /* in milliseconds */
3067 #define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
3068 #define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
3069
3070 /* MCE constants */
3071 -#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
3072 +#define MCE_IRBUF_SIZE 128 /* TX IR buffer length */
3073 #define MCE_TIME_UNIT 50 /* Approx 50us resolution */
3074 -#define MCE_CODE_LENGTH 5 /* Normal length of packet (with header) */
3075 -#define MCE_PACKET_SIZE 4 /* Normal length of packet (without header) */
3076 -#define MCE_IRDATA_HEADER 0x84 /* Actual header format is 0x80 + num_bytes */
3077 +#define MCE_PACKET_SIZE 31 /* Max length of packet (with header) */
3078 +#define MCE_IRDATA_HEADER (0x80 + MCE_PACKET_SIZE - 1)
3079 + /* Actual format is 0x80 + num_bytes */
3080 #define MCE_IRDATA_TRAILER 0x80 /* End of IR data */
3081 #define MCE_MAX_CHANNELS 2 /* Two transmitters, hardware dependent? */
3082 #define MCE_DEFAULT_TX_MASK 0x03 /* Vals: TX1=0x01, TX2=0x02, ALL=0x03 */
3083 @@ -609,9 +610,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
3084 if (len <= skip)
3085 return;
3086
3087 - dev_dbg(dev, "%cx data: %*ph (length=%d)",
3088 - (out ? 't' : 'r'),
3089 - min(len, buf_len - offset), buf + offset, len);
3090 + dev_dbg(dev, "%cx data[%d]: %*ph (len=%d sz=%d)",
3091 + (out ? 't' : 'r'), offset,
3092 + min(len, buf_len - offset), buf + offset, len, buf_len);
3093
3094 inout = out ? "Request" : "Got";
3095
3096 @@ -733,6 +734,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
3097 case MCE_RSP_CMD_ILLEGAL:
3098 dev_dbg(dev, "Illegal PORT_IR command");
3099 break;
3100 + case MCE_RSP_TX_TIMEOUT:
3101 + dev_dbg(dev, "IR TX timeout (TX buffer underrun)");
3102 + break;
3103 default:
3104 dev_dbg(dev, "Unknown command 0x%02x 0x%02x",
3105 cmd, subcmd);
3106 @@ -747,13 +751,14 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, u8 *buf, int buf_len,
3107 dev_dbg(dev, "End of raw IR data");
3108 else if ((cmd != MCE_CMD_PORT_IR) &&
3109 ((cmd & MCE_PORT_MASK) == MCE_COMMAND_IRDATA))
3110 - dev_dbg(dev, "Raw IR data, %d pulse/space samples", ir->rem);
3111 + dev_dbg(dev, "Raw IR data, %d pulse/space samples",
3112 + cmd & MCE_PACKET_LENGTH_MASK);
3113 #endif
3114 }
3115
3116 /*
3117 * Schedule work that can't be done in interrupt handlers
3118 - * (mceusb_dev_recv() and mce_async_callback()) nor tasklets.
3119 + * (mceusb_dev_recv() and mce_write_callback()) nor tasklets.
3120 * Invokes mceusb_deferred_kevent() for recovering from
3121 * error events specified by the kevent bit field.
3122 */
3123 @@ -766,23 +771,80 @@ static void mceusb_defer_kevent(struct mceusb_dev *ir, int kevent)
3124 dev_dbg(ir->dev, "kevent %d scheduled", kevent);
3125 }
3126
3127 -static void mce_async_callback(struct urb *urb)
3128 +static void mce_write_callback(struct urb *urb)
3129 {
3130 - struct mceusb_dev *ir;
3131 - int len;
3132 -
3133 if (!urb)
3134 return;
3135
3136 - ir = urb->context;
3137 + complete(urb->context);
3138 +}
3139 +
3140 +/*
3141 + * Write (TX/send) data to MCE device USB endpoint out.
3142 + * Used for IR blaster TX and MCE device commands.
3143 + *
3144 + * Return: The number of bytes written (> 0) or errno (< 0).
3145 + */
3146 +static int mce_write(struct mceusb_dev *ir, u8 *data, int size)
3147 +{
3148 + int ret;
3149 + struct urb *urb;
3150 + struct device *dev = ir->dev;
3151 + unsigned char *buf_out;
3152 + struct completion tx_done;
3153 + unsigned long expire;
3154 + unsigned long ret_wait;
3155 +
3156 + mceusb_dev_printdata(ir, data, size, 0, size, true);
3157 +
3158 + urb = usb_alloc_urb(0, GFP_KERNEL);
3159 + if (unlikely(!urb)) {
3160 + dev_err(dev, "Error: mce write couldn't allocate urb");
3161 + return -ENOMEM;
3162 + }
3163 +
3164 + buf_out = kmalloc(size, GFP_KERNEL);
3165 + if (!buf_out) {
3166 + usb_free_urb(urb);
3167 + return -ENOMEM;
3168 + }
3169 +
3170 + init_completion(&tx_done);
3171 +
3172 + /* outbound data */
3173 + if (usb_endpoint_xfer_int(ir->usb_ep_out))
3174 + usb_fill_int_urb(urb, ir->usbdev, ir->pipe_out,
3175 + buf_out, size, mce_write_callback, &tx_done,
3176 + ir->usb_ep_out->bInterval);
3177 + else
3178 + usb_fill_bulk_urb(urb, ir->usbdev, ir->pipe_out,
3179 + buf_out, size, mce_write_callback, &tx_done);
3180 + memcpy(buf_out, data, size);
3181 +
3182 + ret = usb_submit_urb(urb, GFP_KERNEL);
3183 + if (ret) {
3184 + dev_err(dev, "Error: mce write submit urb error = %d", ret);
3185 + kfree(buf_out);
3186 + usb_free_urb(urb);
3187 + return ret;
3188 + }
3189 +
3190 + expire = msecs_to_jiffies(USB_TX_TIMEOUT);
3191 + ret_wait = wait_for_completion_timeout(&tx_done, expire);
3192 + if (!ret_wait) {
3193 + dev_err(dev, "Error: mce write timed out (expire = %lu (%dms))",
3194 + expire, USB_TX_TIMEOUT);
3195 + usb_kill_urb(urb);
3196 + ret = (urb->status == -ENOENT ? -ETIMEDOUT : urb->status);
3197 + } else {
3198 + ret = urb->status;
3199 + }
3200 + if (ret >= 0)
3201 + ret = urb->actual_length; /* bytes written */
3202
3203 switch (urb->status) {
3204 /* success */
3205 case 0:
3206 - len = urb->actual_length;
3207 -
3208 - mceusb_dev_printdata(ir, urb->transfer_buffer, len,
3209 - 0, len, true);
3210 break;
3211
3212 case -ECONNRESET:
3213 @@ -792,140 +854,135 @@ static void mce_async_callback(struct urb *urb)
3214 break;
3215
3216 case -EPIPE:
3217 - dev_err(ir->dev, "Error: request urb status = %d (TX HALT)",
3218 + dev_err(ir->dev, "Error: mce write urb status = %d (TX HALT)",
3219 urb->status);
3220 mceusb_defer_kevent(ir, EVENT_TX_HALT);
3221 break;
3222
3223 default:
3224 - dev_err(ir->dev, "Error: request urb status = %d", urb->status);
3225 + dev_err(ir->dev, "Error: mce write urb status = %d",
3226 + urb->status);
3227 break;
3228 }
3229
3230 - /* the transfer buffer and urb were allocated in mce_request_packet */
3231 - kfree(urb->transfer_buffer);
3232 - usb_free_urb(urb);
3233 -}
3234 -
3235 -/* request outgoing (send) usb packet - used to initialize remote */
3236 -static void mce_request_packet(struct mceusb_dev *ir, unsigned char *data,
3237 - int size)
3238 -{
3239 - int res;
3240 - struct urb *async_urb;
3241 - struct device *dev = ir->dev;
3242 - unsigned char *async_buf;
3243 + dev_dbg(dev, "tx done status = %d (wait = %lu, expire = %lu (%dms), urb->actual_length = %d, urb->status = %d)",
3244 + ret, ret_wait, expire, USB_TX_TIMEOUT,
3245 + urb->actual_length, urb->status);
3246
3247 - async_urb = usb_alloc_urb(0, GFP_KERNEL);
3248 - if (unlikely(!async_urb)) {
3249 - dev_err(dev, "Error, couldn't allocate urb!");
3250 - return;
3251 - }
3252 -
3253 - async_buf = kmalloc(size, GFP_KERNEL);
3254 - if (!async_buf) {
3255 - usb_free_urb(async_urb);
3256 - return;
3257 - }
3258 -
3259 - /* outbound data */
3260 - if (usb_endpoint_xfer_int(ir->usb_ep_out))
3261 - usb_fill_int_urb(async_urb, ir->usbdev, ir->pipe_out,
3262 - async_buf, size, mce_async_callback, ir,
3263 - ir->usb_ep_out->bInterval);
3264 - else
3265 - usb_fill_bulk_urb(async_urb, ir->usbdev, ir->pipe_out,
3266 - async_buf, size, mce_async_callback, ir);
3267 -
3268 - memcpy(async_buf, data, size);
3269 -
3270 - dev_dbg(dev, "send request called (size=%#x)", size);
3271 + kfree(buf_out);
3272 + usb_free_urb(urb);
3273
3274 - res = usb_submit_urb(async_urb, GFP_ATOMIC);
3275 - if (res) {
3276 - dev_err(dev, "send request FAILED! (res=%d)", res);
3277 - kfree(async_buf);
3278 - usb_free_urb(async_urb);
3279 - return;
3280 - }
3281 - dev_dbg(dev, "send request complete (res=%d)", res);
3282 + return ret;
3283 }
3284
3285 -static void mce_async_out(struct mceusb_dev *ir, unsigned char *data, int size)
3286 +static void mce_command_out(struct mceusb_dev *ir, u8 *data, int size)
3287 {
3288 int rsize = sizeof(DEVICE_RESUME);
3289
3290 if (ir->need_reset) {
3291 ir->need_reset = false;
3292 - mce_request_packet(ir, DEVICE_RESUME, rsize);
3293 + mce_write(ir, DEVICE_RESUME, rsize);
3294 msleep(10);
3295 }
3296
3297 - mce_request_packet(ir, data, size);
3298 + mce_write(ir, data, size);
3299 msleep(10);
3300 }
3301
3302 -/* Send data out the IR blaster port(s) */
3303 +/*
3304 + * Transmit IR out the MCE device IR blaster port(s).
3305 + *
3306 + * Convert IR pulse/space sequence from LIRC to MCE format.
3307 + * Break up a long IR sequence into multiple parts (MCE IR data packets).
3308 + *
3309 + * u32 txbuf[] consists of IR pulse, space, ..., and pulse times in usec.
3310 + * Pulses and spaces are implicit by their position.
3311 + * The first IR sample, txbuf[0], is always a pulse.
3312 + *
3313 + * u8 irbuf[] consists of multiple IR data packets for the MCE device.
3314 + * A packet is 1 u8 MCE_IRDATA_HEADER and up to 30 u8 IR samples.
3315 + * An IR sample is 1-bit pulse/space flag with 7-bit time
3316 + * in MCE time units (50usec).
3317 + *
3318 + * Return: The number of IR samples sent (> 0) or errno (< 0).
3319 + */
3320 static int mceusb_tx_ir(struct rc_dev *dev, unsigned *txbuf, unsigned count)
3321 {
3322 struct mceusb_dev *ir = dev->priv;
3323 - int i, length, ret = 0;
3324 - int cmdcount = 0;
3325 - unsigned char cmdbuf[MCE_CMDBUF_SIZE];
3326 -
3327 - /* MCE tx init header */
3328 - cmdbuf[cmdcount++] = MCE_CMD_PORT_IR;
3329 - cmdbuf[cmdcount++] = MCE_CMD_SETIRTXPORTS;
3330 - cmdbuf[cmdcount++] = ir->tx_mask;
3331 + u8 cmdbuf[3] = { MCE_CMD_PORT_IR, MCE_CMD_SETIRTXPORTS, 0x00 };
3332 + u8 irbuf[MCE_IRBUF_SIZE];
3333 + int ircount = 0;
3334 + unsigned int irsample;
3335 + int i, length, ret;
3336
3337 /* Send the set TX ports command */
3338 - mce_async_out(ir, cmdbuf, cmdcount);
3339 - cmdcount = 0;
3340 -
3341 - /* Generate mce packet data */
3342 - for (i = 0; (i < count) && (cmdcount < MCE_CMDBUF_SIZE); i++) {
3343 - txbuf[i] = txbuf[i] / MCE_TIME_UNIT;
3344 -
3345 - do { /* loop to support long pulses/spaces > 127*50us=6.35ms */
3346 -
3347 - /* Insert mce packet header every 4th entry */
3348 - if ((cmdcount < MCE_CMDBUF_SIZE) &&
3349 - (cmdcount % MCE_CODE_LENGTH) == 0)
3350 - cmdbuf[cmdcount++] = MCE_IRDATA_HEADER;
3351 -
3352 - /* Insert mce packet data */
3353 - if (cmdcount < MCE_CMDBUF_SIZE)
3354 - cmdbuf[cmdcount++] =
3355 - (txbuf[i] < MCE_PULSE_BIT ?
3356 - txbuf[i] : MCE_MAX_PULSE_LENGTH) |
3357 - (i & 1 ? 0x00 : MCE_PULSE_BIT);
3358 - else {
3359 - ret = -EINVAL;
3360 - goto out;
3361 + cmdbuf[2] = ir->tx_mask;
3362 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
3363 +
3364 + /* Generate mce IR data packet */
3365 + for (i = 0; i < count; i++) {
3366 + irsample = txbuf[i] / MCE_TIME_UNIT;
3367 +
3368 + /* loop to support long pulses/spaces > 6350us (127*50us) */
3369 + while (irsample > 0) {
3370 + /* Insert IR header every 30th entry */
3371 + if (ircount % MCE_PACKET_SIZE == 0) {
3372 + /* Room for IR header and one IR sample? */
3373 + if (ircount >= MCE_IRBUF_SIZE - 1) {
3374 + /* Send near full buffer */
3375 + ret = mce_write(ir, irbuf, ircount);
3376 + if (ret < 0)
3377 + return ret;
3378 + ircount = 0;
3379 + }
3380 + irbuf[ircount++] = MCE_IRDATA_HEADER;
3381 }
3382
3383 - } while ((txbuf[i] > MCE_MAX_PULSE_LENGTH) &&
3384 - (txbuf[i] -= MCE_MAX_PULSE_LENGTH));
3385 - }
3386 -
3387 - /* Check if we have room for the empty packet at the end */
3388 - if (cmdcount >= MCE_CMDBUF_SIZE) {
3389 - ret = -EINVAL;
3390 - goto out;
3391 - }
3392 + /* Insert IR sample */
3393 + if (irsample <= MCE_MAX_PULSE_LENGTH) {
3394 + irbuf[ircount] = irsample;
3395 + irsample = 0;
3396 + } else {
3397 + irbuf[ircount] = MCE_MAX_PULSE_LENGTH;
3398 + irsample -= MCE_MAX_PULSE_LENGTH;
3399 + }
3400 + /*
3401 + * Even i = IR pulse
3402 + * Odd i = IR space
3403 + */
3404 + irbuf[ircount] |= (i & 1 ? 0 : MCE_PULSE_BIT);
3405 + ircount++;
3406 +
3407 + /* IR buffer full? */
3408 + if (ircount >= MCE_IRBUF_SIZE) {
3409 + /* Fix packet length in last header */
3410 + length = ircount % MCE_PACKET_SIZE;
3411 + if (length > 0)
3412 + irbuf[ircount - length] -=
3413 + MCE_PACKET_SIZE - length;
3414 + /* Send full buffer */
3415 + ret = mce_write(ir, irbuf, ircount);
3416 + if (ret < 0)
3417 + return ret;
3418 + ircount = 0;
3419 + }
3420 + }
3421 + } /* after for loop, 0 <= ircount < MCE_IRBUF_SIZE */
3422
3423 /* Fix packet length in last header */
3424 - length = cmdcount % MCE_CODE_LENGTH;
3425 - cmdbuf[cmdcount - length] -= MCE_CODE_LENGTH - length;
3426 + length = ircount % MCE_PACKET_SIZE;
3427 + if (length > 0)
3428 + irbuf[ircount - length] -= MCE_PACKET_SIZE - length;
3429
3430 - /* All mce commands end with an empty packet (0x80) */
3431 - cmdbuf[cmdcount++] = MCE_IRDATA_TRAILER;
3432 + /* Append IR trailer (0x80) to final partial (or empty) IR buffer */
3433 + irbuf[ircount++] = MCE_IRDATA_TRAILER;
3434
3435 - /* Transmit the command to the mce device */
3436 - mce_async_out(ir, cmdbuf, cmdcount);
3437 + /* Send final buffer */
3438 + ret = mce_write(ir, irbuf, ircount);
3439 + if (ret < 0)
3440 + return ret;
3441
3442 -out:
3443 - return ret ? ret : count;
3444 + return count;
3445 }
3446
3447 /* Sets active IR outputs -- mce devices typically have two */
3448 @@ -965,7 +1022,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
3449 cmdbuf[2] = MCE_CMD_SIG_END;
3450 cmdbuf[3] = MCE_IRDATA_TRAILER;
3451 dev_dbg(ir->dev, "disabling carrier modulation");
3452 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
3453 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
3454 return 0;
3455 }
3456
3457 @@ -979,7 +1036,7 @@ static int mceusb_set_tx_carrier(struct rc_dev *dev, u32 carrier)
3458 carrier);
3459
3460 /* Transmit new carrier to mce device */
3461 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
3462 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
3463 return 0;
3464 }
3465 }
3466 @@ -1002,10 +1059,10 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout)
3467 cmdbuf[2] = units >> 8;
3468 cmdbuf[3] = units;
3469
3470 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
3471 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
3472
3473 /* get receiver timeout value */
3474 - mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
3475 + mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
3476
3477 return 0;
3478 }
3479 @@ -1030,7 +1087,7 @@ static int mceusb_set_rx_wideband(struct rc_dev *dev, int enable)
3480 ir->wideband_rx_enabled = false;
3481 cmdbuf[2] = 1; /* port 1 is long range receiver */
3482 }
3483 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
3484 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
3485 /* response from device sets ir->learning_active */
3486
3487 return 0;
3488 @@ -1053,7 +1110,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
3489 ir->carrier_report_enabled = true;
3490 if (!ir->learning_active) {
3491 cmdbuf[2] = 2; /* port 2 is short range receiver */
3492 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
3493 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
3494 }
3495 } else {
3496 ir->carrier_report_enabled = false;
3497 @@ -1064,7 +1121,7 @@ static int mceusb_set_rx_carrier_report(struct rc_dev *dev, int enable)
3498 */
3499 if (ir->learning_active && !ir->wideband_rx_enabled) {
3500 cmdbuf[2] = 1; /* port 1 is long range receiver */
3501 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
3502 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
3503 }
3504 }
3505
3506 @@ -1143,6 +1200,7 @@ static void mceusb_handle_command(struct mceusb_dev *ir, int index)
3507 }
3508 break;
3509 case MCE_RSP_CMD_ILLEGAL:
3510 + case MCE_RSP_TX_TIMEOUT:
3511 ir->need_reset = true;
3512 break;
3513 default:
3514 @@ -1280,7 +1338,7 @@ static void mceusb_get_emulator_version(struct mceusb_dev *ir)
3515 {
3516 /* If we get no reply or an illegal command reply, its ver 1, says MS */
3517 ir->emver = 1;
3518 - mce_async_out(ir, GET_EMVER, sizeof(GET_EMVER));
3519 + mce_command_out(ir, GET_EMVER, sizeof(GET_EMVER));
3520 }
3521
3522 static void mceusb_gen1_init(struct mceusb_dev *ir)
3523 @@ -1326,10 +1384,10 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
3524 dev_dbg(dev, "set handshake - retC = %d", ret);
3525
3526 /* device resume */
3527 - mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
3528 + mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
3529
3530 /* get hw/sw revision? */
3531 - mce_async_out(ir, GET_REVISION, sizeof(GET_REVISION));
3532 + mce_command_out(ir, GET_REVISION, sizeof(GET_REVISION));
3533
3534 kfree(data);
3535 }
3536 @@ -1337,13 +1395,13 @@ static void mceusb_gen1_init(struct mceusb_dev *ir)
3537 static void mceusb_gen2_init(struct mceusb_dev *ir)
3538 {
3539 /* device resume */
3540 - mce_async_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
3541 + mce_command_out(ir, DEVICE_RESUME, sizeof(DEVICE_RESUME));
3542
3543 /* get wake version (protocol, key, address) */
3544 - mce_async_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
3545 + mce_command_out(ir, GET_WAKEVERSION, sizeof(GET_WAKEVERSION));
3546
3547 /* unknown what this one actually returns... */
3548 - mce_async_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
3549 + mce_command_out(ir, GET_UNKNOWN2, sizeof(GET_UNKNOWN2));
3550 }
3551
3552 static void mceusb_get_parameters(struct mceusb_dev *ir)
3553 @@ -1357,24 +1415,24 @@ static void mceusb_get_parameters(struct mceusb_dev *ir)
3554 ir->num_rxports = 2;
3555
3556 /* get number of tx and rx ports */
3557 - mce_async_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
3558 + mce_command_out(ir, GET_NUM_PORTS, sizeof(GET_NUM_PORTS));
3559
3560 /* get the carrier and frequency */
3561 - mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
3562 + mce_command_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
3563
3564 if (ir->num_txports && !ir->flags.no_tx)
3565 /* get the transmitter bitmask */
3566 - mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
3567 + mce_command_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
3568
3569 /* get receiver timeout value */
3570 - mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
3571 + mce_command_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
3572
3573 /* get receiver sensor setting */
3574 - mce_async_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
3575 + mce_command_out(ir, GET_RX_SENSOR, sizeof(GET_RX_SENSOR));
3576
3577 for (i = 0; i < ir->num_txports; i++) {
3578 cmdbuf[2] = i;
3579 - mce_async_out(ir, cmdbuf, sizeof(cmdbuf));
3580 + mce_command_out(ir, cmdbuf, sizeof(cmdbuf));
3581 }
3582 }
3583
3584 @@ -1383,7 +1441,7 @@ static void mceusb_flash_led(struct mceusb_dev *ir)
3585 if (ir->emver < 2)
3586 return;
3587
3588 - mce_async_out(ir, FLASH_LED, sizeof(FLASH_LED));
3589 + mce_command_out(ir, FLASH_LED, sizeof(FLASH_LED));
3590 }
3591
3592 /*
3593 diff --git a/drivers/media/rc/mtk-cir.c b/drivers/media/rc/mtk-cir.c
3594 index e42efd9d382e..d37b85d2bc75 100644
3595 --- a/drivers/media/rc/mtk-cir.c
3596 +++ b/drivers/media/rc/mtk-cir.c
3597 @@ -44,6 +44,11 @@
3598 /* Fields containing pulse width data */
3599 #define MTK_WIDTH_MASK (GENMASK(7, 0))
3600
3601 +/* IR threshold */
3602 +#define MTK_IRTHD 0x14
3603 +#define MTK_DG_CNT_MASK (GENMASK(12, 8))
3604 +#define MTK_DG_CNT(x) ((x) << 8)
3605 +
3606 /* Bit to enable interrupt */
3607 #define MTK_IRINT_EN BIT(0)
3608
3609 @@ -409,6 +414,9 @@ static int mtk_ir_probe(struct platform_device *pdev)
3610 mtk_w32_mask(ir, val, ir->data->fields[MTK_HW_PERIOD].mask,
3611 ir->data->fields[MTK_HW_PERIOD].reg);
3612
3613 + /* Set de-glitch counter */
3614 + mtk_w32_mask(ir, MTK_DG_CNT(1), MTK_DG_CNT_MASK, MTK_IRTHD);
3615 +
3616 /* Enable IR and PWM */
3617 val = mtk_r32(ir, MTK_CONFIG_HIGH_REG);
3618 val |= MTK_OK_COUNT(ir->data->ok_count) | MTK_PWM_EN | MTK_IR_EN;
3619 diff --git a/drivers/media/usb/cpia2/cpia2_usb.c b/drivers/media/usb/cpia2/cpia2_usb.c
3620 index f5b04594e209..4c191fcd3a7f 100644
3621 --- a/drivers/media/usb/cpia2/cpia2_usb.c
3622 +++ b/drivers/media/usb/cpia2/cpia2_usb.c
3623 @@ -685,6 +685,10 @@ static int submit_urbs(struct camera_data *cam)
3624 if (!urb) {
3625 for (j = 0; j < i; j++)
3626 usb_free_urb(cam->sbuf[j].urb);
3627 + for (j = 0; j < NUM_SBUF; j++) {
3628 + kfree(cam->sbuf[j].data);
3629 + cam->sbuf[j].data = NULL;
3630 + }
3631 return -ENOMEM;
3632 }
3633
3634 diff --git a/drivers/media/usb/dvb-usb/dib0700_devices.c b/drivers/media/usb/dvb-usb/dib0700_devices.c
3635 index 091389fdf89e..c8d79502827b 100644
3636 --- a/drivers/media/usb/dvb-usb/dib0700_devices.c
3637 +++ b/drivers/media/usb/dvb-usb/dib0700_devices.c
3638 @@ -2442,9 +2442,13 @@ static int dib9090_tuner_attach(struct dvb_usb_adapter *adap)
3639 8, 0x0486,
3640 };
3641
3642 + if (!IS_ENABLED(CONFIG_DVB_DIB9000))
3643 + return -ENODEV;
3644 if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &dib9090_dib0090_config) == NULL)
3645 return -ENODEV;
3646 i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
3647 + if (!i2c)
3648 + return -ENODEV;
3649 if (dib01x0_pmu_update(i2c, data_dib190, 10) != 0)
3650 return -ENODEV;
3651 dib0700_set_i2c_speed(adap->dev, 1500);
3652 @@ -2520,10 +2524,14 @@ static int nim9090md_tuner_attach(struct dvb_usb_adapter *adap)
3653 0, 0x00ef,
3654 8, 0x0406,
3655 };
3656 + if (!IS_ENABLED(CONFIG_DVB_DIB9000))
3657 + return -ENODEV;
3658 i2c = dib9000_get_tuner_interface(adap->fe_adap[0].fe);
3659 if (dvb_attach(dib0090_fw_register, adap->fe_adap[0].fe, i2c, &nim9090md_dib0090_config[0]) == NULL)
3660 return -ENODEV;
3661 i2c = dib9000_get_i2c_master(adap->fe_adap[0].fe, DIBX000_I2C_INTERFACE_GPIO_1_2, 0);
3662 + if (!i2c)
3663 + return -ENODEV;
3664 if (dib01x0_pmu_update(i2c, data_dib190, 10) < 0)
3665 return -ENODEV;
3666
3667 diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
3668 index 0af74383083d..ae793dac4964 100644
3669 --- a/drivers/media/usb/dvb-usb/pctv452e.c
3670 +++ b/drivers/media/usb/dvb-usb/pctv452e.c
3671 @@ -913,14 +913,6 @@ static int pctv452e_frontend_attach(struct dvb_usb_adapter *a)
3672 &a->dev->i2c_adap);
3673 if (!a->fe_adap[0].fe)
3674 return -ENODEV;
3675 -
3676 - /*
3677 - * dvb_frontend will call dvb_detach for both stb0899_detach
3678 - * and stb0899_release but we only do dvb_attach(stb0899_attach).
3679 - * Increment the module refcount instead.
3680 - */
3681 - symbol_get(stb0899_attach);
3682 -
3683 if ((dvb_attach(lnbp22_attach, a->fe_adap[0].fe,
3684 &a->dev->i2c_adap)) == NULL)
3685 err("Cannot attach lnbp22\n");
3686 diff --git a/drivers/media/usb/em28xx/em28xx-cards.c b/drivers/media/usb/em28xx/em28xx-cards.c
3687 index 87b887b7604e..3f59a98dbf9a 100644
3688 --- a/drivers/media/usb/em28xx/em28xx-cards.c
3689 +++ b/drivers/media/usb/em28xx/em28xx-cards.c
3690 @@ -4020,7 +4020,6 @@ static void em28xx_usb_disconnect(struct usb_interface *intf)
3691 dev->dev_next->disconnected = 1;
3692 dev_info(&dev->intf->dev, "Disconnecting %s\n",
3693 dev->dev_next->name);
3694 - flush_request_modules(dev->dev_next);
3695 }
3696
3697 dev->disconnected = 1;
3698 diff --git a/drivers/media/usb/gspca/konica.c b/drivers/media/usb/gspca/konica.c
3699 index 989ae997f66d..89b9293b31be 100644
3700 --- a/drivers/media/usb/gspca/konica.c
3701 +++ b/drivers/media/usb/gspca/konica.c
3702 @@ -123,6 +123,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 value, u16 index)
3703 if (ret < 0) {
3704 pr_err("reg_r err %d\n", ret);
3705 gspca_dev->usb_err = ret;
3706 + /*
3707 + * Make sure the buffer is zeroed to avoid uninitialized
3708 + * values.
3709 + */
3710 + memset(gspca_dev->usb_buf, 0, 2);
3711 }
3712 }
3713
3714 diff --git a/drivers/media/usb/gspca/nw80x.c b/drivers/media/usb/gspca/nw80x.c
3715 index bedc04a72e97..bde4441f935e 100644
3716 --- a/drivers/media/usb/gspca/nw80x.c
3717 +++ b/drivers/media/usb/gspca/nw80x.c
3718 @@ -1581,6 +1581,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
3719 if (ret < 0) {
3720 pr_err("reg_r err %d\n", ret);
3721 gspca_dev->usb_err = ret;
3722 + /*
3723 + * Make sure the buffer is zeroed to avoid uninitialized
3724 + * values.
3725 + */
3726 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
3727 return;
3728 }
3729 if (len == 1)
3730 diff --git a/drivers/media/usb/gspca/ov519.c b/drivers/media/usb/gspca/ov519.c
3731 index 10fcbe9e8614..cb41e61d50dd 100644
3732 --- a/drivers/media/usb/gspca/ov519.c
3733 +++ b/drivers/media/usb/gspca/ov519.c
3734 @@ -2083,6 +2083,11 @@ static int reg_r(struct sd *sd, u16 index)
3735 } else {
3736 gspca_err(gspca_dev, "reg_r %02x failed %d\n", index, ret);
3737 sd->gspca_dev.usb_err = ret;
3738 + /*
3739 + * Make sure the result is zeroed to avoid uninitialized
3740 + * values.
3741 + */
3742 + gspca_dev->usb_buf[0] = 0;
3743 }
3744
3745 return ret;
3746 @@ -2111,6 +2116,11 @@ static int reg_r8(struct sd *sd,
3747 } else {
3748 gspca_err(gspca_dev, "reg_r8 %02x failed %d\n", index, ret);
3749 sd->gspca_dev.usb_err = ret;
3750 + /*
3751 + * Make sure the buffer is zeroed to avoid uninitialized
3752 + * values.
3753 + */
3754 + memset(gspca_dev->usb_buf, 0, 8);
3755 }
3756
3757 return ret;
3758 diff --git a/drivers/media/usb/gspca/ov534.c b/drivers/media/usb/gspca/ov534.c
3759 index d06dc0755b9a..9e3326b66c79 100644
3760 --- a/drivers/media/usb/gspca/ov534.c
3761 +++ b/drivers/media/usb/gspca/ov534.c
3762 @@ -642,6 +642,11 @@ static u8 ov534_reg_read(struct gspca_dev *gspca_dev, u16 reg)
3763 if (ret < 0) {
3764 pr_err("read failed %d\n", ret);
3765 gspca_dev->usb_err = ret;
3766 + /*
3767 + * Make sure the result is zeroed to avoid uninitialized
3768 + * values.
3769 + */
3770 + gspca_dev->usb_buf[0] = 0;
3771 }
3772 return gspca_dev->usb_buf[0];
3773 }
3774 diff --git a/drivers/media/usb/gspca/ov534_9.c b/drivers/media/usb/gspca/ov534_9.c
3775 index 3d1364d2f83e..4d4ae22e9640 100644
3776 --- a/drivers/media/usb/gspca/ov534_9.c
3777 +++ b/drivers/media/usb/gspca/ov534_9.c
3778 @@ -1154,6 +1154,7 @@ static u8 reg_r(struct gspca_dev *gspca_dev, u16 reg)
3779 if (ret < 0) {
3780 pr_err("reg_r err %d\n", ret);
3781 gspca_dev->usb_err = ret;
3782 + return 0;
3783 }
3784 return gspca_dev->usb_buf[0];
3785 }
3786 diff --git a/drivers/media/usb/gspca/se401.c b/drivers/media/usb/gspca/se401.c
3787 index 477da0664b7d..40b87717bb5c 100644
3788 --- a/drivers/media/usb/gspca/se401.c
3789 +++ b/drivers/media/usb/gspca/se401.c
3790 @@ -111,6 +111,11 @@ static void se401_read_req(struct gspca_dev *gspca_dev, u16 req, int silent)
3791 pr_err("read req failed req %#04x error %d\n",
3792 req, err);
3793 gspca_dev->usb_err = err;
3794 + /*
3795 + * Make sure the buffer is zeroed to avoid uninitialized
3796 + * values.
3797 + */
3798 + memset(gspca_dev->usb_buf, 0, READ_REQ_SIZE);
3799 }
3800 }
3801
3802 diff --git a/drivers/media/usb/gspca/sn9c20x.c b/drivers/media/usb/gspca/sn9c20x.c
3803 index cfa2a04d9f3f..efca54ee0f35 100644
3804 --- a/drivers/media/usb/gspca/sn9c20x.c
3805 +++ b/drivers/media/usb/gspca/sn9c20x.c
3806 @@ -132,6 +132,13 @@ static const struct dmi_system_id flip_dmi_table[] = {
3807 DMI_MATCH(DMI_PRODUCT_VERSION, "0341")
3808 }
3809 },
3810 + {
3811 + .ident = "MSI MS-1039",
3812 + .matches = {
3813 + DMI_MATCH(DMI_SYS_VENDOR, "MICRO-STAR INT'L CO.,LTD."),
3814 + DMI_MATCH(DMI_PRODUCT_NAME, "MS-1039"),
3815 + }
3816 + },
3817 {
3818 .ident = "MSI MS-1632",
3819 .matches = {
3820 @@ -918,6 +925,11 @@ static void reg_r(struct gspca_dev *gspca_dev, u16 reg, u16 length)
3821 if (unlikely(result < 0 || result != length)) {
3822 pr_err("Read register %02x failed %d\n", reg, result);
3823 gspca_dev->usb_err = result;
3824 + /*
3825 + * Make sure the buffer is zeroed to avoid uninitialized
3826 + * values.
3827 + */
3828 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
3829 }
3830 }
3831
3832 diff --git a/drivers/media/usb/gspca/sonixb.c b/drivers/media/usb/gspca/sonixb.c
3833 index 5f3f2979540a..22de65d840dd 100644
3834 --- a/drivers/media/usb/gspca/sonixb.c
3835 +++ b/drivers/media/usb/gspca/sonixb.c
3836 @@ -462,6 +462,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
3837 dev_err(gspca_dev->v4l2_dev.dev,
3838 "Error reading register %02x: %d\n", value, res);
3839 gspca_dev->usb_err = res;
3840 + /*
3841 + * Make sure the result is zeroed to avoid uninitialized
3842 + * values.
3843 + */
3844 + gspca_dev->usb_buf[0] = 0;
3845 }
3846 }
3847
3848 diff --git a/drivers/media/usb/gspca/sonixj.c b/drivers/media/usb/gspca/sonixj.c
3849 index df8d8482b795..fa108ce000ad 100644
3850 --- a/drivers/media/usb/gspca/sonixj.c
3851 +++ b/drivers/media/usb/gspca/sonixj.c
3852 @@ -1171,6 +1171,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
3853 if (ret < 0) {
3854 pr_err("reg_r err %d\n", ret);
3855 gspca_dev->usb_err = ret;
3856 + /*
3857 + * Make sure the buffer is zeroed to avoid uninitialized
3858 + * values.
3859 + */
3860 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
3861 }
3862 }
3863
3864 diff --git a/drivers/media/usb/gspca/spca1528.c b/drivers/media/usb/gspca/spca1528.c
3865 index d25924e430f3..a20eb8580db2 100644
3866 --- a/drivers/media/usb/gspca/spca1528.c
3867 +++ b/drivers/media/usb/gspca/spca1528.c
3868 @@ -80,6 +80,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
3869 if (ret < 0) {
3870 pr_err("reg_r err %d\n", ret);
3871 gspca_dev->usb_err = ret;
3872 + /*
3873 + * Make sure the buffer is zeroed to avoid uninitialized
3874 + * values.
3875 + */
3876 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
3877 }
3878 }
3879
3880 diff --git a/drivers/media/usb/gspca/sq930x.c b/drivers/media/usb/gspca/sq930x.c
3881 index d7cbcf2b3947..3521f5ff428e 100644
3882 --- a/drivers/media/usb/gspca/sq930x.c
3883 +++ b/drivers/media/usb/gspca/sq930x.c
3884 @@ -434,6 +434,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
3885 if (ret < 0) {
3886 pr_err("reg_r %04x failed %d\n", value, ret);
3887 gspca_dev->usb_err = ret;
3888 + /*
3889 + * Make sure the buffer is zeroed to avoid uninitialized
3890 + * values.
3891 + */
3892 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
3893 }
3894 }
3895
3896 diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
3897 index 437a3367ab97..26eae69a2562 100644
3898 --- a/drivers/media/usb/gspca/sunplus.c
3899 +++ b/drivers/media/usb/gspca/sunplus.c
3900 @@ -264,6 +264,11 @@ static void reg_r(struct gspca_dev *gspca_dev,
3901 if (ret < 0) {
3902 pr_err("reg_r err %d\n", ret);
3903 gspca_dev->usb_err = ret;
3904 + /*
3905 + * Make sure the buffer is zeroed to avoid uninitialized
3906 + * values.
3907 + */
3908 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
3909 }
3910 }
3911
3912 diff --git a/drivers/media/usb/gspca/vc032x.c b/drivers/media/usb/gspca/vc032x.c
3913 index 52d071659634..6e32264d3825 100644
3914 --- a/drivers/media/usb/gspca/vc032x.c
3915 +++ b/drivers/media/usb/gspca/vc032x.c
3916 @@ -2915,6 +2915,11 @@ static void reg_r_i(struct gspca_dev *gspca_dev,
3917 if (ret < 0) {
3918 pr_err("reg_r err %d\n", ret);
3919 gspca_dev->usb_err = ret;
3920 + /*
3921 + * Make sure the buffer is zeroed to avoid uninitialized
3922 + * values.
3923 + */
3924 + memset(gspca_dev->usb_buf, 0, USB_BUF_SZ);
3925 }
3926 }
3927 static void reg_r(struct gspca_dev *gspca_dev,
3928 diff --git a/drivers/media/usb/gspca/w996Xcf.c b/drivers/media/usb/gspca/w996Xcf.c
3929 index abfab3de1866..ef0a839f9b8a 100644
3930 --- a/drivers/media/usb/gspca/w996Xcf.c
3931 +++ b/drivers/media/usb/gspca/w996Xcf.c
3932 @@ -143,6 +143,11 @@ static int w9968cf_read_sb(struct sd *sd)
3933 } else {
3934 pr_err("Read SB reg [01] failed\n");
3935 sd->gspca_dev.usb_err = ret;
3936 + /*
3937 + * Make sure the buffer is zeroed to avoid uninitialized
3938 + * values.
3939 + */
3940 + memset(sd->gspca_dev.usb_buf, 0, 2);
3941 }
3942
3943 udelay(W9968CF_I2C_BUS_DELAY);
3944 diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
3945 index 29ac7fc5b039..3316a17c141b 100644
3946 --- a/drivers/media/usb/hdpvr/hdpvr-core.c
3947 +++ b/drivers/media/usb/hdpvr/hdpvr-core.c
3948 @@ -141,6 +141,7 @@ static int device_authorization(struct hdpvr_device *dev)
3949
3950 dev->fw_ver = dev->usbc_buf[1];
3951
3952 + dev->usbc_buf[46] = '\0';
3953 v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n",
3954 dev->fw_ver, &dev->usbc_buf[2]);
3955
3956 @@ -275,6 +276,7 @@ static int hdpvr_probe(struct usb_interface *interface,
3957 #endif
3958 size_t buffer_size;
3959 int i;
3960 + int dev_num;
3961 int retval = -ENOMEM;
3962
3963 /* allocate memory for our device state and initialize it */
3964 @@ -372,8 +374,17 @@ static int hdpvr_probe(struct usb_interface *interface,
3965 }
3966 #endif
3967
3968 + dev_num = atomic_inc_return(&dev_nr);
3969 + if (dev_num >= HDPVR_MAX) {
3970 + v4l2_err(&dev->v4l2_dev,
3971 + "max device number reached, device register failed\n");
3972 + atomic_dec(&dev_nr);
3973 + retval = -ENODEV;
3974 + goto reg_fail;
3975 + }
3976 +
3977 retval = hdpvr_register_videodev(dev, &interface->dev,
3978 - video_nr[atomic_inc_return(&dev_nr)]);
3979 + video_nr[dev_num]);
3980 if (retval < 0) {
3981 v4l2_err(&dev->v4l2_dev, "registering videodev failed\n");
3982 goto reg_fail;
3983 diff --git a/drivers/media/usb/ttusb-dec/ttusb_dec.c b/drivers/media/usb/ttusb-dec/ttusb_dec.c
3984 index 44ca66cb9b8f..f34efa7c61b4 100644
3985 --- a/drivers/media/usb/ttusb-dec/ttusb_dec.c
3986 +++ b/drivers/media/usb/ttusb-dec/ttusb_dec.c
3987 @@ -329,7 +329,7 @@ static int ttusb_dec_send_command(struct ttusb_dec *dec, const u8 command,
3988
3989 dprintk("%s\n", __func__);
3990
3991 - b = kmalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
3992 + b = kzalloc(COMMAND_PACKET_SIZE + 4, GFP_KERNEL);
3993 if (!b)
3994 return -ENOMEM;
3995
3996 diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
3997 index b299a24d33f9..d206f2de80d2 100644
3998 --- a/drivers/mmc/core/sdio_irq.c
3999 +++ b/drivers/mmc/core/sdio_irq.c
4000 @@ -35,6 +35,7 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
4001 {
4002 struct mmc_card *card = host->card;
4003 int i, ret, count;
4004 + bool sdio_irq_pending = host->sdio_irq_pending;
4005 unsigned char pending;
4006 struct sdio_func *func;
4007
4008 @@ -42,13 +43,16 @@ static int process_sdio_pending_irqs(struct mmc_host *host)
4009 if (mmc_card_suspended(card))
4010 return 0;
4011
4012 + /* Clear the flag to indicate that we have processed the IRQ. */
4013 + host->sdio_irq_pending = false;
4014 +
4015 /*
4016 * Optimization, if there is only 1 function interrupt registered
4017 * and we know an IRQ was signaled then call irq handler directly.
4018 * Otherwise do the full probe.
4019 */
4020 func = card->sdio_single_irq;
4021 - if (func && host->sdio_irq_pending) {
4022 + if (func && sdio_irq_pending) {
4023 func->irq_handler(func);
4024 return 1;
4025 }
4026 @@ -100,7 +104,6 @@ void sdio_run_irqs(struct mmc_host *host)
4027 {
4028 mmc_claim_host(host);
4029 if (host->sdio_irqs) {
4030 - host->sdio_irq_pending = true;
4031 process_sdio_pending_irqs(host);
4032 if (host->ops->ack_sdio_irq)
4033 host->ops->ack_sdio_irq(host);
4034 @@ -119,6 +122,7 @@ void sdio_irq_work(struct work_struct *work)
4035
4036 void sdio_signal_irq(struct mmc_host *host)
4037 {
4038 + host->sdio_irq_pending = true;
4039 queue_delayed_work(system_wq, &host->sdio_irq_work, 0);
4040 }
4041 EXPORT_SYMBOL_GPL(sdio_signal_irq);
4042 @@ -164,7 +168,6 @@ static int sdio_irq_thread(void *_host)
4043 if (ret)
4044 break;
4045 ret = process_sdio_pending_irqs(host);
4046 - host->sdio_irq_pending = false;
4047 mmc_release_host(host);
4048
4049 /*
4050 diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
4051 index 942da07c9eb8..22c454c7aaca 100644
4052 --- a/drivers/mmc/host/dw_mmc.c
4053 +++ b/drivers/mmc/host/dw_mmc.c
4054 @@ -3486,6 +3486,10 @@ int dw_mci_runtime_resume(struct device *dev)
4055 /* Force setup bus to guarantee available clock output */
4056 dw_mci_setup_bus(host->slot, true);
4057
4058 + /* Re-enable SDIO interrupts. */
4059 + if (sdio_irq_claimed(host->slot->mmc))
4060 + __dw_mci_enable_sdio_irq(host->slot, 1);
4061 +
4062 /* Now that slots are all setup, we can enable card detect */
4063 dw_mci_enable_cd(host);
4064
4065 diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
4066 index c749d3dc1d36..eb33b892b484 100644
4067 --- a/drivers/mmc/host/sdhci.c
4068 +++ b/drivers/mmc/host/sdhci.c
4069 @@ -1713,7 +1713,9 @@ void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
4070 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
4071 else if (timing == MMC_TIMING_UHS_SDR12)
4072 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
4073 - else if (timing == MMC_TIMING_UHS_SDR25)
4074 + else if (timing == MMC_TIMING_SD_HS ||
4075 + timing == MMC_TIMING_MMC_HS ||
4076 + timing == MMC_TIMING_UHS_SDR25)
4077 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
4078 else if (timing == MMC_TIMING_UHS_SDR50)
4079 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
4080 diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c
4081 index 8459115d9d4e..553776cc1d29 100644
4082 --- a/drivers/net/arcnet/arcnet.c
4083 +++ b/drivers/net/arcnet/arcnet.c
4084 @@ -1063,31 +1063,34 @@ EXPORT_SYMBOL(arcnet_interrupt);
4085 static void arcnet_rx(struct net_device *dev, int bufnum)
4086 {
4087 struct arcnet_local *lp = netdev_priv(dev);
4088 - struct archdr pkt;
4089 + union {
4090 + struct archdr pkt;
4091 + char buf[512];
4092 + } rxdata;
4093 struct arc_rfc1201 *soft;
4094 int length, ofs;
4095
4096 - soft = &pkt.soft.rfc1201;
4097 + soft = &rxdata.pkt.soft.rfc1201;
4098
4099 - lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE);
4100 - if (pkt.hard.offset[0]) {
4101 - ofs = pkt.hard.offset[0];
4102 + lp->hw.copy_from_card(dev, bufnum, 0, &rxdata.pkt, ARC_HDR_SIZE);
4103 + if (rxdata.pkt.hard.offset[0]) {
4104 + ofs = rxdata.pkt.hard.offset[0];
4105 length = 256 - ofs;
4106 } else {
4107 - ofs = pkt.hard.offset[1];
4108 + ofs = rxdata.pkt.hard.offset[1];
4109 length = 512 - ofs;
4110 }
4111
4112 /* get the full header, if possible */
4113 - if (sizeof(pkt.soft) <= length) {
4114 - lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(pkt.soft));
4115 + if (sizeof(rxdata.pkt.soft) <= length) {
4116 + lp->hw.copy_from_card(dev, bufnum, ofs, soft, sizeof(rxdata.pkt.soft));
4117 } else {
4118 - memset(&pkt.soft, 0, sizeof(pkt.soft));
4119 + memset(&rxdata.pkt.soft, 0, sizeof(rxdata.pkt.soft));
4120 lp->hw.copy_from_card(dev, bufnum, ofs, soft, length);
4121 }
4122
4123 arc_printk(D_DURING, dev, "Buffer #%d: received packet from %02Xh to %02Xh (%d+4 bytes)\n",
4124 - bufnum, pkt.hard.source, pkt.hard.dest, length);
4125 + bufnum, rxdata.pkt.hard.source, rxdata.pkt.hard.dest, length);
4126
4127 dev->stats.rx_packets++;
4128 dev->stats.rx_bytes += length + ARC_HDR_SIZE;
4129 @@ -1096,13 +1099,13 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
4130 if (arc_proto_map[soft->proto]->is_ip) {
4131 if (BUGLVL(D_PROTO)) {
4132 struct ArcProto
4133 - *oldp = arc_proto_map[lp->default_proto[pkt.hard.source]],
4134 + *oldp = arc_proto_map[lp->default_proto[rxdata.pkt.hard.source]],
4135 *newp = arc_proto_map[soft->proto];
4136
4137 if (oldp != newp) {
4138 arc_printk(D_PROTO, dev,
4139 "got protocol %02Xh; encap for host %02Xh is now '%c' (was '%c')\n",
4140 - soft->proto, pkt.hard.source,
4141 + soft->proto, rxdata.pkt.hard.source,
4142 newp->suffix, oldp->suffix);
4143 }
4144 }
4145 @@ -1111,10 +1114,10 @@ static void arcnet_rx(struct net_device *dev, int bufnum)
4146 lp->default_proto[0] = soft->proto;
4147
4148 /* in striking contrast, the following isn't a hack. */
4149 - lp->default_proto[pkt.hard.source] = soft->proto;
4150 + lp->default_proto[rxdata.pkt.hard.source] = soft->proto;
4151 }
4152 /* call the protocol-specific receiver. */
4153 - arc_proto_map[soft->proto]->rx(dev, bufnum, &pkt, length);
4154 + arc_proto_map[soft->proto]->rx(dev, bufnum, &rxdata.pkt, length);
4155 }
4156
4157 static void null_rx(struct net_device *dev, int bufnum,
4158 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
4159 index cdae0efde8e6..7998a73b6a0f 100644
4160 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
4161 +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
4162 @@ -1429,6 +1429,16 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
4163 else
4164 phy_reg |= 0xFA;
4165 e1e_wphy_locked(hw, I217_PLL_CLOCK_GATE_REG, phy_reg);
4166 +
4167 + if (speed == SPEED_1000) {
4168 + hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
4169 + &phy_reg);
4170 +
4171 + phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
4172 +
4173 + hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
4174 + phy_reg);
4175 + }
4176 }
4177 hw->phy.ops.release(hw);
4178
4179 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
4180 index eb09c755fa17..1502895eb45d 100644
4181 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
4182 +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
4183 @@ -210,7 +210,7 @@
4184
4185 /* PHY Power Management Control */
4186 #define HV_PM_CTRL PHY_REG(770, 17)
4187 -#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
4188 +#define HV_PM_CTRL_K1_CLK_REQ 0x200
4189 #define HV_PM_CTRL_K1_ENABLE 0x4000
4190
4191 #define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
4192 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
4193 index 4e04985fb430..055562c930fb 100644
4194 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
4195 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
4196 @@ -2566,6 +2566,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
4197 return;
4198 if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
4199 return;
4200 + if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
4201 + set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
4202 + return;
4203 + }
4204
4205 for (v = 0; v < pf->num_alloc_vsi; v++) {
4206 if (pf->vsi[v] &&
4207 @@ -2580,6 +2584,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
4208 }
4209 }
4210 }
4211 + clear_bit(__I40E_VF_DISABLE, pf->state);
4212 }
4213
4214 /**
4215 diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
4216 index 15dea48e0195..d6f8a41c3e35 100644
4217 --- a/drivers/net/ethernet/marvell/skge.c
4218 +++ b/drivers/net/ethernet/marvell/skge.c
4219 @@ -3122,7 +3122,7 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
4220 skb_put(skb, len);
4221
4222 if (dev->features & NETIF_F_RXCSUM) {
4223 - skb->csum = csum;
4224 + skb->csum = le16_to_cpu(csum);
4225 skb->ip_summed = CHECKSUM_COMPLETE;
4226 }
4227
4228 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
4229 index 0e820cf92f8a..231ed508c240 100644
4230 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
4231 +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
4232 @@ -1642,6 +1642,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
4233 { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
4234 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
4235 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
4236 + { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
4237 { 0, }
4238 };
4239
4240 diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
4241 index e57d23746585..22c572a09b32 100644
4242 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c
4243 +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
4244 @@ -259,6 +259,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
4245 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
4246 if (!repr_priv) {
4247 err = -ENOMEM;
4248 + nfp_repr_free(repr);
4249 goto err_reprs_clean;
4250 }
4251
4252 @@ -291,6 +292,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
4253 err = nfp_repr_init(app, repr,
4254 port_id, port, priv->nn->dp.netdev);
4255 if (err) {
4256 + kfree(repr_priv);
4257 nfp_port_free(port);
4258 nfp_repr_free(repr);
4259 goto err_reprs_clean;
4260 @@ -373,6 +375,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
4261 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
4262 if (!repr_priv) {
4263 err = -ENOMEM;
4264 + nfp_repr_free(repr);
4265 goto err_reprs_clean;
4266 }
4267
4268 @@ -382,11 +385,13 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
4269 port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
4270 if (IS_ERR(port)) {
4271 err = PTR_ERR(port);
4272 + kfree(repr_priv);
4273 nfp_repr_free(repr);
4274 goto err_reprs_clean;
4275 }
4276 err = nfp_port_init_phy_port(app->pf, app, port, i);
4277 if (err) {
4278 + kfree(repr_priv);
4279 nfp_port_free(port);
4280 nfp_repr_free(repr);
4281 goto err_reprs_clean;
4282 @@ -399,6 +404,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
4283 err = nfp_repr_init(app, repr,
4284 cmsg_port_id, port, priv->nn->dp.netdev);
4285 if (err) {
4286 + kfree(repr_priv);
4287 nfp_port_free(port);
4288 nfp_repr_free(repr);
4289 goto err_reprs_clean;
4290 diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
4291 index 08381ef8bdb4..41d30f55c946 100644
4292 --- a/drivers/net/ethernet/nxp/lpc_eth.c
4293 +++ b/drivers/net/ethernet/nxp/lpc_eth.c
4294 @@ -1371,13 +1371,14 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
4295 pldat->dma_buff_base_p = dma_handle;
4296
4297 netdev_dbg(ndev, "IO address space :%pR\n", res);
4298 - netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
4299 + netdev_dbg(ndev, "IO address size :%zd\n",
4300 + (size_t)resource_size(res));
4301 netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
4302 pldat->net_base);
4303 netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
4304 - netdev_dbg(ndev, "DMA buffer size :%d\n", pldat->dma_buff_size);
4305 - netdev_dbg(ndev, "DMA buffer P address :0x%08x\n",
4306 - pldat->dma_buff_base_p);
4307 + netdev_dbg(ndev, "DMA buffer size :%zd\n", pldat->dma_buff_size);
4308 + netdev_dbg(ndev, "DMA buffer P address :%pad\n",
4309 + &pldat->dma_buff_base_p);
4310 netdev_dbg(ndev, "DMA buffer V address :0x%p\n",
4311 pldat->dma_buff_base_v);
4312
4313 @@ -1424,8 +1425,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
4314 if (ret)
4315 goto err_out_unregister_netdev;
4316
4317 - netdev_info(ndev, "LPC mac at 0x%08x irq %d\n",
4318 - res->start, ndev->irq);
4319 + netdev_info(ndev, "LPC mac at 0x%08lx irq %d\n",
4320 + (unsigned long)res->start, ndev->irq);
4321
4322 phydev = ndev->phydev;
4323
4324 diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
4325 index 2c971357e66c..0dc92d2faa64 100644
4326 --- a/drivers/net/macsec.c
4327 +++ b/drivers/net/macsec.c
4328 @@ -1238,6 +1238,7 @@ deliver:
4329 macsec_rxsa_put(rx_sa);
4330 macsec_rxsc_put(rx_sc);
4331
4332 + skb_orphan(skb);
4333 ret = gro_cells_receive(&macsec->gro_cells, skb);
4334 if (ret == NET_RX_SUCCESS)
4335 count_rx(dev, skb->len);
4336 diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c
4337 index 2b1e336961f9..bf4070ef6b84 100644
4338 --- a/drivers/net/phy/national.c
4339 +++ b/drivers/net/phy/national.c
4340 @@ -110,14 +110,17 @@ static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
4341
4342 static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
4343 {
4344 + u16 lb_dis = BIT(1);
4345 +
4346 if (disable)
4347 - ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
4348 + ns_exp_write(phydev, 0x1c0,
4349 + ns_exp_read(phydev, 0x1c0) | lb_dis);
4350 else
4351 ns_exp_write(phydev, 0x1c0,
4352 - ns_exp_read(phydev, 0x1c0) & 0xfffe);
4353 + ns_exp_read(phydev, 0x1c0) & ~lb_dis);
4354
4355 pr_debug("10BASE-T HDX loopback %s\n",
4356 - (ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
4357 + (ns_exp_read(phydev, 0x1c0) & lb_dis) ? "off" : "on");
4358 }
4359
4360 static int ns_config_init(struct phy_device *phydev)
4361 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
4362 index 02ad03a2fab7..3e014ecffef8 100644
4363 --- a/drivers/net/ppp/ppp_generic.c
4364 +++ b/drivers/net/ppp/ppp_generic.c
4365 @@ -1419,6 +1419,8 @@ static void __ppp_xmit_process(struct ppp *ppp, struct sk_buff *skb)
4366 netif_wake_queue(ppp->dev);
4367 else
4368 netif_stop_queue(ppp->dev);
4369 + } else {
4370 + kfree_skb(skb);
4371 }
4372 ppp_xmit_unlock(ppp);
4373 }
4374 diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
4375 index 1eaec648bd1f..f53e3e4e25f3 100644
4376 --- a/drivers/net/usb/cdc_ncm.c
4377 +++ b/drivers/net/usb/cdc_ncm.c
4378 @@ -681,8 +681,12 @@ cdc_ncm_find_endpoints(struct usbnet *dev, struct usb_interface *intf)
4379 u8 ep;
4380
4381 for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
4382 -
4383 e = intf->cur_altsetting->endpoint + ep;
4384 +
4385 + /* ignore endpoints which cannot transfer data */
4386 + if (!usb_endpoint_maxp(&e->desc))
4387 + continue;
4388 +
4389 switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
4390 case USB_ENDPOINT_XFER_INT:
4391 if (usb_endpoint_dir_in(&e->desc)) {
4392 diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
4393 index 10854977c55f..84b354f76dea 100644
4394 --- a/drivers/net/usb/usbnet.c
4395 +++ b/drivers/net/usb/usbnet.c
4396 @@ -112,6 +112,11 @@ int usbnet_get_endpoints(struct usbnet *dev, struct usb_interface *intf)
4397 int intr = 0;
4398
4399 e = alt->endpoint + ep;
4400 +
4401 + /* ignore endpoints which cannot transfer data */
4402 + if (!usb_endpoint_maxp(&e->desc))
4403 + continue;
4404 +
4405 switch (e->desc.bmAttributes) {
4406 case USB_ENDPOINT_XFER_INT:
4407 if (!usb_endpoint_dir_in(&e->desc))
4408 @@ -351,6 +356,8 @@ void usbnet_update_max_qlen(struct usbnet *dev)
4409 {
4410 enum usb_device_speed speed = dev->udev->speed;
4411
4412 + if (!dev->rx_urb_size || !dev->hard_mtu)
4413 + goto insanity;
4414 switch (speed) {
4415 case USB_SPEED_HIGH:
4416 dev->rx_qlen = MAX_QUEUE_MEMORY / dev->rx_urb_size;
4417 @@ -367,6 +374,7 @@ void usbnet_update_max_qlen(struct usbnet *dev)
4418 dev->tx_qlen = 5 * MAX_QUEUE_MEMORY / dev->hard_mtu;
4419 break;
4420 default:
4421 +insanity:
4422 dev->rx_qlen = dev->tx_qlen = 4;
4423 }
4424 }
4425 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
4426 index 3fe7605a2cca..9cb9f0544c9b 100644
4427 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
4428 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
4429 @@ -843,11 +843,13 @@ static bool iwl_mvm_sar_geo_support(struct iwl_mvm *mvm)
4430 * firmware versions. Unfortunately, we don't have a TLV API
4431 * flag to rely on, so rely on the major version which is in
4432 * the first byte of ucode_ver. This was implemented
4433 - * initially on version 38 and then backported to 36, 29 and
4434 - * 17.
4435 + * initially on version 38 and then backported to29 and 17.
4436 + * The intention was to have it in 36 as well, but not all
4437 + * 8000 family got this feature enabled. The 8000 family is
4438 + * the only one using version 36, so skip this version
4439 + * entirely.
4440 */
4441 return IWL_UCODE_SERIAL(mvm->fw->ucode_ver) >= 38 ||
4442 - IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 36 ||
4443 IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 29 ||
4444 IWL_UCODE_SERIAL(mvm->fw->ucode_ver) == 17;
4445 }
4446 diff --git a/drivers/net/wireless/marvell/libertas/if_usb.c b/drivers/net/wireless/marvell/libertas/if_usb.c
4447 index 3dbfce972c56..9e82ec12564b 100644
4448 --- a/drivers/net/wireless/marvell/libertas/if_usb.c
4449 +++ b/drivers/net/wireless/marvell/libertas/if_usb.c
4450 @@ -49,7 +49,8 @@ static const struct lbs_fw_table fw_table[] = {
4451 { MODEL_8388, "libertas/usb8388_v5.bin", NULL },
4452 { MODEL_8388, "libertas/usb8388.bin", NULL },
4453 { MODEL_8388, "usb8388.bin", NULL },
4454 - { MODEL_8682, "libertas/usb8682.bin", NULL }
4455 + { MODEL_8682, "libertas/usb8682.bin", NULL },
4456 + { 0, NULL, NULL }
4457 };
4458
4459 static const struct usb_device_id if_usb_table[] = {
4460 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
4461 index f57feb8fdea4..892ef5212232 100644
4462 --- a/drivers/nvme/host/multipath.c
4463 +++ b/drivers/nvme/host/multipath.c
4464 @@ -404,14 +404,16 @@ static int nvme_update_ana_state(struct nvme_ctrl *ctrl,
4465
4466 down_write(&ctrl->namespaces_rwsem);
4467 list_for_each_entry(ns, &ctrl->namespaces, list) {
4468 - if (ns->head->ns_id != le32_to_cpu(desc->nsids[n]))
4469 + unsigned nsid = le32_to_cpu(desc->nsids[n]);
4470 +
4471 + if (ns->head->ns_id < nsid)
4472 continue;
4473 - nvme_update_ns_ana_state(desc, ns);
4474 + if (ns->head->ns_id == nsid)
4475 + nvme_update_ns_ana_state(desc, ns);
4476 if (++n == nr_nsids)
4477 break;
4478 }
4479 up_write(&ctrl->namespaces_rwsem);
4480 - WARN_ON_ONCE(n < nr_nsids);
4481 return 0;
4482 }
4483
4484 diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
4485 index 2008fa62a373..a8eb8784e151 100644
4486 --- a/drivers/nvme/target/admin-cmd.c
4487 +++ b/drivers/nvme/target/admin-cmd.c
4488 @@ -68,9 +68,11 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
4489 goto out;
4490
4491 host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
4492 - data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]);
4493 + data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
4494 + sectors[READ]), 1000);
4495 host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
4496 - data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
4497 + data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
4498 + sectors[WRITE]), 1000);
4499
4500 put_unaligned_le64(host_reads, &slog->host_reads[0]);
4501 put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
4502 @@ -98,11 +100,11 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
4503 if (!ns->bdev)
4504 continue;
4505 host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
4506 - data_units_read +=
4507 - part_stat_read(ns->bdev->bd_part, sectors[READ]);
4508 + data_units_read += DIV_ROUND_UP(
4509 + part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
4510 host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
4511 - data_units_written +=
4512 - part_stat_read(ns->bdev->bd_part, sectors[WRITE]);
4513 + data_units_written += DIV_ROUND_UP(
4514 + part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
4515
4516 }
4517 rcu_read_unlock();
4518 diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
4519 index 7390fb8ca9d1..29df6ab29e95 100644
4520 --- a/drivers/parisc/dino.c
4521 +++ b/drivers/parisc/dino.c
4522 @@ -160,6 +160,15 @@ struct dino_device
4523 (struct dino_device *)__pdata; })
4524
4525
4526 +/* Check if PCI device is behind a Card-mode Dino. */
4527 +static int pci_dev_is_behind_card_dino(struct pci_dev *dev)
4528 +{
4529 + struct dino_device *dino_dev;
4530 +
4531 + dino_dev = DINO_DEV(parisc_walk_tree(dev->bus->bridge));
4532 + return is_card_dino(&dino_dev->hba.dev->id);
4533 +}
4534 +
4535 /*
4536 * Dino Configuration Space Accessor Functions
4537 */
4538 @@ -442,6 +451,21 @@ static void quirk_cirrus_cardbus(struct pci_dev *dev)
4539 }
4540 DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_6832, quirk_cirrus_cardbus );
4541
4542 +#ifdef CONFIG_TULIP
4543 +static void pci_fixup_tulip(struct pci_dev *dev)
4544 +{
4545 + if (!pci_dev_is_behind_card_dino(dev))
4546 + return;
4547 + if (!(pci_resource_flags(dev, 1) & IORESOURCE_MEM))
4548 + return;
4549 + pr_warn("%s: HP HSC-PCI Cards with card-mode Dino not yet supported.\n",
4550 + pci_name(dev));
4551 + /* Disable this card by zeroing the PCI resources */
4552 + memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
4553 + memset(&dev->resource[1], 0, sizeof(dev->resource[1]));
4554 +}
4555 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_DEC, PCI_ANY_ID, pci_fixup_tulip);
4556 +#endif /* CONFIG_TULIP */
4557
4558 static void __init
4559 dino_bios_init(void)
4560 diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
4561 index 088d1c2047e6..36bd2545afb6 100644
4562 --- a/drivers/platform/x86/intel_pmc_core.c
4563 +++ b/drivers/platform/x86/intel_pmc_core.c
4564 @@ -685,10 +685,14 @@ static int __init pmc_core_probe(void)
4565 if (pmcdev->map == &spt_reg_map && !pci_dev_present(pmc_pci_ids))
4566 pmcdev->map = &cnp_reg_map;
4567
4568 - if (lpit_read_residency_count_address(&slp_s0_addr))
4569 + if (lpit_read_residency_count_address(&slp_s0_addr)) {
4570 pmcdev->base_addr = PMC_BASE_ADDR_DEFAULT;
4571 - else
4572 +
4573 + if (page_is_ram(PHYS_PFN(pmcdev->base_addr)))
4574 + return -ENODEV;
4575 + } else {
4576 pmcdev->base_addr = slp_s0_addr - pmcdev->map->slp_s0_offset;
4577 + }
4578
4579 pmcdev->regbase = ioremap(pmcdev->base_addr,
4580 pmcdev->map->regmap_length);
4581 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
4582 index 9577d8941846..f312764660e6 100644
4583 --- a/drivers/regulator/core.c
4584 +++ b/drivers/regulator/core.c
4585 @@ -4789,7 +4789,7 @@ static int __init regulator_init(void)
4586 /* init early to allow our consumers to complete system booting */
4587 core_initcall(regulator_init);
4588
4589 -static int __init regulator_late_cleanup(struct device *dev, void *data)
4590 +static int regulator_late_cleanup(struct device *dev, void *data)
4591 {
4592 struct regulator_dev *rdev = dev_to_rdev(dev);
4593 const struct regulator_ops *ops = rdev->desc->ops;
4594 @@ -4838,17 +4838,8 @@ unlock:
4595 return 0;
4596 }
4597
4598 -static int __init regulator_init_complete(void)
4599 +static void regulator_init_complete_work_function(struct work_struct *work)
4600 {
4601 - /*
4602 - * Since DT doesn't provide an idiomatic mechanism for
4603 - * enabling full constraints and since it's much more natural
4604 - * with DT to provide them just assume that a DT enabled
4605 - * system has full constraints.
4606 - */
4607 - if (of_have_populated_dt())
4608 - has_full_constraints = true;
4609 -
4610 /*
4611 * Regulators may had failed to resolve their input supplies
4612 * when were registered, either because the input supply was
4613 @@ -4866,6 +4857,35 @@ static int __init regulator_init_complete(void)
4614 */
4615 class_for_each_device(&regulator_class, NULL, NULL,
4616 regulator_late_cleanup);
4617 +}
4618 +
4619 +static DECLARE_DELAYED_WORK(regulator_init_complete_work,
4620 + regulator_init_complete_work_function);
4621 +
4622 +static int __init regulator_init_complete(void)
4623 +{
4624 + /*
4625 + * Since DT doesn't provide an idiomatic mechanism for
4626 + * enabling full constraints and since it's much more natural
4627 + * with DT to provide them just assume that a DT enabled
4628 + * system has full constraints.
4629 + */
4630 + if (of_have_populated_dt())
4631 + has_full_constraints = true;
4632 +
4633 + /*
4634 + * We punt completion for an arbitrary amount of time since
4635 + * systems like distros will load many drivers from userspace
4636 + * so consumers might not always be ready yet, this is
4637 + * particularly an issue with laptops where this might bounce
4638 + * the display off then on. Ideally we'd get a notification
4639 + * from userspace when this happens but we don't so just wait
4640 + * a bit and hope we waited long enough. It'd be better if
4641 + * we'd only do this on systems that need it, and a kernel
4642 + * command line option might be useful.
4643 + */
4644 + schedule_delayed_work(&regulator_init_complete_work,
4645 + msecs_to_jiffies(30000));
4646
4647 class_for_each_device(&regulator_class, NULL, NULL,
4648 regulator_register_fill_coupling_array);
4649 diff --git a/drivers/regulator/lm363x-regulator.c b/drivers/regulator/lm363x-regulator.c
4650 index b615a413ca9f..27c0a67cfd0e 100644
4651 --- a/drivers/regulator/lm363x-regulator.c
4652 +++ b/drivers/regulator/lm363x-regulator.c
4653 @@ -33,7 +33,7 @@
4654
4655 /* LM3632 */
4656 #define LM3632_BOOST_VSEL_MAX 0x26
4657 -#define LM3632_LDO_VSEL_MAX 0x29
4658 +#define LM3632_LDO_VSEL_MAX 0x28
4659 #define LM3632_VBOOST_MIN 4500000
4660 #define LM3632_VLDO_MIN 4000000
4661
4662 diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
4663 index d27fabae8ddd..6c629ef1bc4e 100644
4664 --- a/drivers/scsi/device_handler/scsi_dh_rdac.c
4665 +++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
4666 @@ -546,6 +546,8 @@ static void send_mode_select(struct work_struct *work)
4667 spin_unlock(&ctlr->ms_lock);
4668
4669 retry:
4670 + memset(cdb, 0, sizeof(cdb));
4671 +
4672 data_size = rdac_failover_get(ctlr, &list, cdb);
4673
4674 RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
4675 diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
4676 index 7c1f36b69bdc..bee9cfb29152 100644
4677 --- a/drivers/scsi/qla2xxx/qla_init.c
4678 +++ b/drivers/scsi/qla2xxx/qla_init.c
4679 @@ -216,8 +216,13 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
4680 struct srb_iocb *lio;
4681 int rval = QLA_FUNCTION_FAILED;
4682
4683 - if (!vha->flags.online)
4684 - goto done;
4685 + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
4686 + fcport->loop_id == FC_NO_LOOP_ID) {
4687 + ql_log(ql_log_warn, vha, 0xffff,
4688 + "%s: %8phC - not sending command.\n",
4689 + __func__, fcport->port_name);
4690 + return rval;
4691 + }
4692
4693 sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL);
4694 if (!sp)
4695 @@ -1123,8 +1128,13 @@ int qla24xx_async_gpdb(struct scsi_qla_host *vha, fc_port_t *fcport, u8 opt)
4696 struct port_database_24xx *pd;
4697 struct qla_hw_data *ha = vha->hw;
4698
4699 - if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT))
4700 + if (!vha->flags.online || (fcport->flags & FCF_ASYNC_SENT) ||
4701 + fcport->loop_id == FC_NO_LOOP_ID) {
4702 + ql_log(ql_log_warn, vha, 0xffff,
4703 + "%s: %8phC - not sending command.\n",
4704 + __func__, fcport->port_name);
4705 return rval;
4706 + }
4707
4708 fcport->disc_state = DSC_GPDB;
4709
4710 @@ -1904,8 +1914,11 @@ qla24xx_handle_plogi_done_event(struct scsi_qla_host *vha, struct event_arg *ea)
4711 return;
4712 }
4713
4714 - if (fcport->disc_state == DSC_DELETE_PEND)
4715 + if ((fcport->disc_state == DSC_DELETE_PEND) ||
4716 + (fcport->disc_state == DSC_DELETED)) {
4717 + set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
4718 return;
4719 + }
4720
4721 if (ea->sp->gen2 != fcport->login_gen) {
4722 /* target side must have changed it. */
4723 @@ -6557,8 +6570,10 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
4724 }
4725
4726 /* Clear all async request states across all VPs. */
4727 - list_for_each_entry(fcport, &vha->vp_fcports, list)
4728 + list_for_each_entry(fcport, &vha->vp_fcports, list) {
4729 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
4730 + fcport->scan_state = 0;
4731 + }
4732 spin_lock_irqsave(&ha->vport_slock, flags);
4733 list_for_each_entry(vp, &ha->vp_list, list) {
4734 atomic_inc(&vp->vref_count);
4735 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
4736 index 02fa81f122c2..60b6019a2fca 100644
4737 --- a/drivers/scsi/qla2xxx/qla_os.c
4738 +++ b/drivers/scsi/qla2xxx/qla_os.c
4739 @@ -4864,6 +4864,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
4740 if (fcport) {
4741 fcport->id_changed = 1;
4742 fcport->scan_state = QLA_FCPORT_FOUND;
4743 + fcport->chip_reset = vha->hw->base_qpair->chip_reset;
4744 memcpy(fcport->node_name, e->u.new_sess.node_name, WWN_SIZE);
4745
4746 if (pla) {
4747 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
4748 index 9d7feb005acf..7a1cc0b25e59 100644
4749 --- a/drivers/scsi/qla2xxx/qla_target.c
4750 +++ b/drivers/scsi/qla2xxx/qla_target.c
4751 @@ -1216,7 +1216,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
4752 sess->logout_on_delete = 0;
4753 sess->logo_ack_needed = 0;
4754 sess->fw_login_state = DSC_LS_PORT_UNAVAIL;
4755 - sess->scan_state = 0;
4756 }
4757 }
4758
4759 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
4760 index 75b926e70076..abfcc2f924ce 100644
4761 --- a/drivers/scsi/scsi_lib.c
4762 +++ b/drivers/scsi/scsi_lib.c
4763 @@ -1252,6 +1252,18 @@ static void scsi_initialize_rq(struct request *rq)
4764 cmd->retries = 0;
4765 }
4766
4767 +/*
4768 + * Only called when the request isn't completed by SCSI, and not freed by
4769 + * SCSI
4770 + */
4771 +static void scsi_cleanup_rq(struct request *rq)
4772 +{
4773 + if (rq->rq_flags & RQF_DONTPREP) {
4774 + scsi_mq_uninit_cmd(blk_mq_rq_to_pdu(rq));
4775 + rq->rq_flags &= ~RQF_DONTPREP;
4776 + }
4777 +}
4778 +
4779 /* Add a command to the list used by the aacraid and dpt_i2o drivers */
4780 void scsi_add_cmd_to_list(struct scsi_cmnd *cmd)
4781 {
4782 @@ -2339,6 +2351,7 @@ static const struct blk_mq_ops scsi_mq_ops = {
4783 .init_request = scsi_mq_init_request,
4784 .exit_request = scsi_mq_exit_request,
4785 .initialize_rq_fn = scsi_initialize_rq,
4786 + .cleanup_rq = scsi_cleanup_rq,
4787 .map_queues = scsi_map_queues,
4788 };
4789
4790 diff --git a/drivers/staging/media/imx/imx6-mipi-csi2.c b/drivers/staging/media/imx/imx6-mipi-csi2.c
4791 index ceeeb3069a02..212fa06f7c57 100644
4792 --- a/drivers/staging/media/imx/imx6-mipi-csi2.c
4793 +++ b/drivers/staging/media/imx/imx6-mipi-csi2.c
4794 @@ -247,7 +247,7 @@ static int __maybe_unused csi2_dphy_wait_ulp(struct csi2_dev *csi2)
4795 }
4796
4797 /* Waits for low-power LP-11 state on data and clock lanes. */
4798 -static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
4799 +static void csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
4800 {
4801 u32 mask, reg;
4802 int ret;
4803 @@ -258,11 +258,9 @@ static int csi2_dphy_wait_stopstate(struct csi2_dev *csi2)
4804 ret = readl_poll_timeout(csi2->base + CSI2_PHY_STATE, reg,
4805 (reg & mask) == mask, 0, 500000);
4806 if (ret) {
4807 - v4l2_err(&csi2->sd, "LP-11 timeout, phy_state = 0x%08x\n", reg);
4808 - return ret;
4809 + v4l2_warn(&csi2->sd, "LP-11 wait timeout, likely a sensor driver bug, expect capture failures.\n");
4810 + v4l2_warn(&csi2->sd, "phy_state = 0x%08x\n", reg);
4811 }
4812 -
4813 - return 0;
4814 }
4815
4816 /* Wait for active clock on the clock lane. */
4817 @@ -320,9 +318,7 @@ static int csi2_start(struct csi2_dev *csi2)
4818 csi2_enable(csi2, true);
4819
4820 /* Step 5 */
4821 - ret = csi2_dphy_wait_stopstate(csi2);
4822 - if (ret)
4823 - goto err_assert_reset;
4824 + csi2_dphy_wait_stopstate(csi2);
4825
4826 /* Step 6 */
4827 ret = v4l2_subdev_call(csi2->src_sd, video, s_stream, 1);
4828 diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
4829 index 9f39f0c360e0..cc1006375cac 100644
4830 --- a/drivers/video/fbdev/efifb.c
4831 +++ b/drivers/video/fbdev/efifb.c
4832 @@ -122,28 +122,13 @@ static void efifb_copy_bmp(u8 *src, u32 *dst, int width, struct screen_info *si)
4833 */
4834 static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
4835 {
4836 - static const int default_resolutions[][2] = {
4837 - { 800, 600 },
4838 - { 1024, 768 },
4839 - { 1280, 1024 },
4840 - };
4841 - u32 i, right_margin;
4842 -
4843 - for (i = 0; i < ARRAY_SIZE(default_resolutions); i++) {
4844 - if (default_resolutions[i][0] == si->lfb_width &&
4845 - default_resolutions[i][1] == si->lfb_height)
4846 - break;
4847 - }
4848 - /* If not a default resolution used for textmode, this should be fine */
4849 - if (i >= ARRAY_SIZE(default_resolutions))
4850 - return true;
4851 -
4852 - /* If the right margin is 5 times smaller then the left one, reject */
4853 - right_margin = si->lfb_width - (bgrt_tab.image_offset_x + bmp_width);
4854 - if (right_margin < (bgrt_tab.image_offset_x / 5))
4855 - return false;
4856 + /*
4857 + * All x86 firmwares horizontally center the image (the yoffset
4858 + * calculations differ between boards, but xoffset is predictable).
4859 + */
4860 + u32 expected_xoffset = (si->lfb_width - bmp_width) / 2;
4861
4862 - return true;
4863 + return bgrt_tab.image_offset_x == expected_xoffset;
4864 }
4865 #else
4866 static bool efifb_bgrt_sanity_check(struct screen_info *si, u32 bmp_width)
4867 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
4868 index 9a47e4e5dea0..e7fd0b5b9234 100644
4869 --- a/fs/binfmt_elf.c
4870 +++ b/fs/binfmt_elf.c
4871 @@ -1144,7 +1144,8 @@ static int load_elf_binary(struct linux_binprm *bprm)
4872 * (since it grows up, and may collide early with the stack
4873 * growing down), and into the unused ELF_ET_DYN_BASE region.
4874 */
4875 - if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) && !interpreter)
4876 + if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
4877 + loc->elf_ex.e_type == ET_DYN && !interpreter)
4878 current->mm->brk = current->mm->start_brk =
4879 ELF_ET_DYN_BASE;
4880
4881 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
4882 index 79ac1ebabaf7..9fd383285f0e 100644
4883 --- a/fs/btrfs/ctree.c
4884 +++ b/fs/btrfs/ctree.c
4885 @@ -1374,6 +1374,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
4886 struct tree_mod_elem *tm;
4887 struct extent_buffer *eb = NULL;
4888 struct extent_buffer *eb_root;
4889 + u64 eb_root_owner = 0;
4890 struct extent_buffer *old;
4891 struct tree_mod_root *old_root = NULL;
4892 u64 old_generation = 0;
4893 @@ -1411,6 +1412,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
4894 free_extent_buffer(old);
4895 }
4896 } else if (old_root) {
4897 + eb_root_owner = btrfs_header_owner(eb_root);
4898 btrfs_tree_read_unlock(eb_root);
4899 free_extent_buffer(eb_root);
4900 eb = alloc_dummy_extent_buffer(fs_info, logical);
4901 @@ -1428,7 +1430,7 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
4902 if (old_root) {
4903 btrfs_set_header_bytenr(eb, eb->start);
4904 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
4905 - btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
4906 + btrfs_set_header_owner(eb, eb_root_owner);
4907 btrfs_set_header_level(eb, old_root->level);
4908 btrfs_set_header_generation(eb, old_generation);
4909 }
4910 @@ -5514,6 +5516,7 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
4911 advance_left = advance_right = 0;
4912
4913 while (1) {
4914 + cond_resched();
4915 if (advance_left && !left_end_reached) {
4916 ret = tree_advance(fs_info, left_path, &left_level,
4917 left_root_level,
4918 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
4919 index 4644f9b629a5..faca485ccd8f 100644
4920 --- a/fs/btrfs/ctree.h
4921 +++ b/fs/btrfs/ctree.h
4922 @@ -39,6 +39,7 @@ extern struct kmem_cache *btrfs_trans_handle_cachep;
4923 extern struct kmem_cache *btrfs_bit_radix_cachep;
4924 extern struct kmem_cache *btrfs_path_cachep;
4925 extern struct kmem_cache *btrfs_free_space_cachep;
4926 +extern struct kmem_cache *btrfs_free_space_bitmap_cachep;
4927 struct btrfs_ordered_sum;
4928
4929 #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
4930 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4931 index 88c939f7aad9..e49e29288049 100644
4932 --- a/fs/btrfs/extent-tree.c
4933 +++ b/fs/btrfs/extent-tree.c
4934 @@ -7367,6 +7367,14 @@ search:
4935 */
4936 if ((flags & extra) && !(block_group->flags & extra))
4937 goto loop;
4938 +
4939 + /*
4940 + * This block group has different flags than we want.
4941 + * It's possible that we have MIXED_GROUP flag but no
4942 + * block group is mixed. Just skip such block group.
4943 + */
4944 + btrfs_release_block_group(block_group, delalloc);
4945 + continue;
4946 }
4947
4948 have_block_group:
4949 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
4950 index 8ecf8c0e5fe6..4381e0aba8c0 100644
4951 --- a/fs/btrfs/free-space-cache.c
4952 +++ b/fs/btrfs/free-space-cache.c
4953 @@ -763,7 +763,8 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
4954 } else {
4955 ASSERT(num_bitmaps);
4956 num_bitmaps--;
4957 - e->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
4958 + e->bitmap = kmem_cache_zalloc(
4959 + btrfs_free_space_bitmap_cachep, GFP_NOFS);
4960 if (!e->bitmap) {
4961 kmem_cache_free(
4962 btrfs_free_space_cachep, e);
4963 @@ -1864,7 +1865,7 @@ static void free_bitmap(struct btrfs_free_space_ctl *ctl,
4964 struct btrfs_free_space *bitmap_info)
4965 {
4966 unlink_free_space(ctl, bitmap_info);
4967 - kfree(bitmap_info->bitmap);
4968 + kmem_cache_free(btrfs_free_space_bitmap_cachep, bitmap_info->bitmap);
4969 kmem_cache_free(btrfs_free_space_cachep, bitmap_info);
4970 ctl->total_bitmaps--;
4971 ctl->op->recalc_thresholds(ctl);
4972 @@ -2118,7 +2119,8 @@ new_bitmap:
4973 }
4974
4975 /* allocate the bitmap */
4976 - info->bitmap = kzalloc(PAGE_SIZE, GFP_NOFS);
4977 + info->bitmap = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep,
4978 + GFP_NOFS);
4979 spin_lock(&ctl->tree_lock);
4980 if (!info->bitmap) {
4981 ret = -ENOMEM;
4982 @@ -2130,7 +2132,8 @@ new_bitmap:
4983 out:
4984 if (info) {
4985 if (info->bitmap)
4986 - kfree(info->bitmap);
4987 + kmem_cache_free(btrfs_free_space_bitmap_cachep,
4988 + info->bitmap);
4989 kmem_cache_free(btrfs_free_space_cachep, info);
4990 }
4991
4992 @@ -2786,7 +2789,8 @@ out:
4993 if (entry->bytes == 0) {
4994 ctl->free_extents--;
4995 if (entry->bitmap) {
4996 - kfree(entry->bitmap);
4997 + kmem_cache_free(btrfs_free_space_bitmap_cachep,
4998 + entry->bitmap);
4999 ctl->total_bitmaps--;
5000 ctl->op->recalc_thresholds(ctl);
5001 }
5002 @@ -3594,7 +3598,7 @@ again:
5003 }
5004
5005 if (!map) {
5006 - map = kzalloc(PAGE_SIZE, GFP_NOFS);
5007 + map = kmem_cache_zalloc(btrfs_free_space_bitmap_cachep, GFP_NOFS);
5008 if (!map) {
5009 kmem_cache_free(btrfs_free_space_cachep, info);
5010 return -ENOMEM;
5011 @@ -3624,7 +3628,7 @@ again:
5012 if (info)
5013 kmem_cache_free(btrfs_free_space_cachep, info);
5014 if (map)
5015 - kfree(map);
5016 + kmem_cache_free(btrfs_free_space_bitmap_cachep, map);
5017 return 0;
5018 }
5019
5020 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
5021 index 98c535ae038d..37332f83a3a9 100644
5022 --- a/fs/btrfs/inode.c
5023 +++ b/fs/btrfs/inode.c
5024 @@ -72,6 +72,7 @@ static struct kmem_cache *btrfs_inode_cachep;
5025 struct kmem_cache *btrfs_trans_handle_cachep;
5026 struct kmem_cache *btrfs_path_cachep;
5027 struct kmem_cache *btrfs_free_space_cachep;
5028 +struct kmem_cache *btrfs_free_space_bitmap_cachep;
5029
5030 #define S_SHIFT 12
5031 static const unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
5032 @@ -9361,6 +9362,7 @@ void __cold btrfs_destroy_cachep(void)
5033 kmem_cache_destroy(btrfs_trans_handle_cachep);
5034 kmem_cache_destroy(btrfs_path_cachep);
5035 kmem_cache_destroy(btrfs_free_space_cachep);
5036 + kmem_cache_destroy(btrfs_free_space_bitmap_cachep);
5037 }
5038
5039 int __init btrfs_init_cachep(void)
5040 @@ -9390,6 +9392,12 @@ int __init btrfs_init_cachep(void)
5041 if (!btrfs_free_space_cachep)
5042 goto fail;
5043
5044 + btrfs_free_space_bitmap_cachep = kmem_cache_create("btrfs_free_space_bitmap",
5045 + PAGE_SIZE, PAGE_SIZE,
5046 + SLAB_RED_ZONE, NULL);
5047 + if (!btrfs_free_space_bitmap_cachep)
5048 + goto fail;
5049 +
5050 return 0;
5051 fail:
5052 btrfs_destroy_cachep();
5053 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
5054 index 734866ab5194..3ea2008dcde3 100644
5055 --- a/fs/btrfs/qgroup.c
5056 +++ b/fs/btrfs/qgroup.c
5057 @@ -2796,9 +2796,6 @@ out:
5058 btrfs_free_path(path);
5059
5060 mutex_lock(&fs_info->qgroup_rescan_lock);
5061 - if (!btrfs_fs_closing(fs_info))
5062 - fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
5063 -
5064 if (err > 0 &&
5065 fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT) {
5066 fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_INCONSISTENT;
5067 @@ -2814,16 +2811,30 @@ out:
5068 trans = btrfs_start_transaction(fs_info->quota_root, 1);
5069 if (IS_ERR(trans)) {
5070 err = PTR_ERR(trans);
5071 + trans = NULL;
5072 btrfs_err(fs_info,
5073 "fail to start transaction for status update: %d",
5074 err);
5075 - goto done;
5076 }
5077 - ret = update_qgroup_status_item(trans);
5078 - if (ret < 0) {
5079 - err = ret;
5080 - btrfs_err(fs_info, "fail to update qgroup status: %d", err);
5081 +
5082 + mutex_lock(&fs_info->qgroup_rescan_lock);
5083 + if (!btrfs_fs_closing(fs_info))
5084 + fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
5085 + if (trans) {
5086 + ret = update_qgroup_status_item(trans);
5087 + if (ret < 0) {
5088 + err = ret;
5089 + btrfs_err(fs_info, "fail to update qgroup status: %d",
5090 + err);
5091 + }
5092 }
5093 + fs_info->qgroup_rescan_running = false;
5094 + complete_all(&fs_info->qgroup_rescan_completion);
5095 + mutex_unlock(&fs_info->qgroup_rescan_lock);
5096 +
5097 + if (!trans)
5098 + return;
5099 +
5100 btrfs_end_transaction(trans);
5101
5102 if (btrfs_fs_closing(fs_info)) {
5103 @@ -2834,12 +2845,6 @@ out:
5104 } else {
5105 btrfs_err(fs_info, "qgroup scan failed with %d", err);
5106 }
5107 -
5108 -done:
5109 - mutex_lock(&fs_info->qgroup_rescan_lock);
5110 - fs_info->qgroup_rescan_running = false;
5111 - mutex_unlock(&fs_info->qgroup_rescan_lock);
5112 - complete_all(&fs_info->qgroup_rescan_completion);
5113 }
5114
5115 /*
5116 @@ -3067,6 +3072,9 @@ cleanup:
5117 while ((unode = ulist_next(&reserved->range_changed, &uiter)))
5118 clear_extent_bit(&BTRFS_I(inode)->io_tree, unode->val,
5119 unode->aux, EXTENT_QGROUP_RESERVED, 0, 0, NULL);
5120 + /* Also free data bytes of already reserved one */
5121 + btrfs_qgroup_free_refroot(root->fs_info, root->root_key.objectid,
5122 + orig_reserved, BTRFS_QGROUP_RSV_DATA);
5123 extent_changeset_release(reserved);
5124 return ret;
5125 }
5126 @@ -3111,7 +3119,7 @@ static int qgroup_free_reserved_data(struct inode *inode,
5127 * EXTENT_QGROUP_RESERVED, we won't double free.
5128 * So not need to rush.
5129 */
5130 - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_failure_tree,
5131 + ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree,
5132 free_start, free_start + free_len - 1,
5133 EXTENT_QGROUP_RESERVED, &changeset);
5134 if (ret < 0)
5135 diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
5136 index 665a86f83f4b..c06845237cba 100644
5137 --- a/fs/ceph/inode.c
5138 +++ b/fs/ceph/inode.c
5139 @@ -579,7 +579,10 @@ void ceph_evict_inode(struct inode *inode)
5140 ceph_buffer_put(ci->i_xattrs.prealloc_blob);
5141
5142 ceph_put_string(rcu_dereference_raw(ci->i_layout.pool_ns));
5143 +}
5144
5145 +void ceph_destroy_inode(struct inode *inode)
5146 +{
5147 call_rcu(&inode->i_rcu, ceph_i_callback);
5148 }
5149
5150 diff --git a/fs/ceph/super.c b/fs/ceph/super.c
5151 index 02528e11bf33..ccab249a37f6 100644
5152 --- a/fs/ceph/super.c
5153 +++ b/fs/ceph/super.c
5154 @@ -827,6 +827,7 @@ static int ceph_remount(struct super_block *sb, int *flags, char *data)
5155
5156 static const struct super_operations ceph_super_ops = {
5157 .alloc_inode = ceph_alloc_inode,
5158 + .destroy_inode = ceph_destroy_inode,
5159 .write_inode = ceph_write_inode,
5160 .drop_inode = ceph_drop_inode,
5161 .evict_inode = ceph_evict_inode,
5162 diff --git a/fs/ceph/super.h b/fs/ceph/super.h
5163 index 6e968e48e5e4..8d3eabf06d66 100644
5164 --- a/fs/ceph/super.h
5165 +++ b/fs/ceph/super.h
5166 @@ -855,6 +855,7 @@ extern const struct inode_operations ceph_file_iops;
5167
5168 extern struct inode *ceph_alloc_inode(struct super_block *sb);
5169 extern void ceph_evict_inode(struct inode *inode);
5170 +extern void ceph_destroy_inode(struct inode *inode);
5171 extern int ceph_drop_inode(struct inode *inode);
5172
5173 extern struct inode *ceph_get_inode(struct super_block *sb,
5174 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
5175 index 64e3888f30e6..d5457015801d 100644
5176 --- a/fs/cifs/cifsfs.c
5177 +++ b/fs/cifs/cifsfs.c
5178 @@ -428,6 +428,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
5179 cifs_show_security(s, tcon->ses);
5180 cifs_show_cache_flavor(s, cifs_sb);
5181
5182 + if (tcon->no_lease)
5183 + seq_puts(s, ",nolease");
5184 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER)
5185 seq_puts(s, ",multiuser");
5186 else if (tcon->ses->user_name)
5187 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
5188 index 57af9bac0045..4dbae6e268d6 100644
5189 --- a/fs/cifs/cifsglob.h
5190 +++ b/fs/cifs/cifsglob.h
5191 @@ -543,6 +543,7 @@ struct smb_vol {
5192 bool noblocksnd:1;
5193 bool noautotune:1;
5194 bool nostrictsync:1; /* do not force expensive SMBflush on every sync */
5195 + bool no_lease:1; /* disable requesting leases */
5196 bool fsc:1; /* enable fscache */
5197 bool mfsymlinks:1; /* use Minshall+French Symlinks */
5198 bool multiuser:1;
5199 @@ -1004,6 +1005,7 @@ struct cifs_tcon {
5200 bool need_reopen_files:1; /* need to reopen tcon file handles */
5201 bool use_resilient:1; /* use resilient instead of durable handles */
5202 bool use_persistent:1; /* use persistent instead of durable handles */
5203 + bool no_lease:1; /* Do not request leases on files or directories */
5204 __le32 capabilities;
5205 __u32 share_flags;
5206 __u32 maximal_access;
5207 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
5208 index c290e231f918..966e493c82e5 100644
5209 --- a/fs/cifs/connect.c
5210 +++ b/fs/cifs/connect.c
5211 @@ -70,7 +70,7 @@ enum {
5212 Opt_user_xattr, Opt_nouser_xattr,
5213 Opt_forceuid, Opt_noforceuid,
5214 Opt_forcegid, Opt_noforcegid,
5215 - Opt_noblocksend, Opt_noautotune,
5216 + Opt_noblocksend, Opt_noautotune, Opt_nolease,
5217 Opt_hard, Opt_soft, Opt_perm, Opt_noperm,
5218 Opt_mapposix, Opt_nomapposix,
5219 Opt_mapchars, Opt_nomapchars, Opt_sfu,
5220 @@ -129,6 +129,7 @@ static const match_table_t cifs_mount_option_tokens = {
5221 { Opt_noforcegid, "noforcegid" },
5222 { Opt_noblocksend, "noblocksend" },
5223 { Opt_noautotune, "noautotune" },
5224 + { Opt_nolease, "nolease" },
5225 { Opt_hard, "hard" },
5226 { Opt_soft, "soft" },
5227 { Opt_perm, "perm" },
5228 @@ -1542,6 +1543,9 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
5229 case Opt_noautotune:
5230 vol->noautotune = 1;
5231 break;
5232 + case Opt_nolease:
5233 + vol->no_lease = 1;
5234 + break;
5235 case Opt_hard:
5236 vol->retry = 1;
5237 break;
5238 @@ -3023,6 +3027,8 @@ static int match_tcon(struct cifs_tcon *tcon, struct smb_vol *volume_info)
5239 return 0;
5240 if (tcon->snapshot_time != volume_info->snapshot_time)
5241 return 0;
5242 + if (tcon->no_lease != volume_info->no_lease)
5243 + return 0;
5244 return 1;
5245 }
5246
5247 @@ -3231,6 +3237,7 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
5248 tcon->nocase = volume_info->nocase;
5249 tcon->nohandlecache = volume_info->nohandlecache;
5250 tcon->local_lease = volume_info->local_lease;
5251 + tcon->no_lease = volume_info->no_lease;
5252 INIT_LIST_HEAD(&tcon->pending_opens);
5253
5254 spin_lock(&cifs_tcp_ses_lock);
5255 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
5256 index 094be406cde4..f0d966da7f37 100644
5257 --- a/fs/cifs/smb2ops.c
5258 +++ b/fs/cifs/smb2ops.c
5259 @@ -2398,6 +2398,11 @@ smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
5260 if (oplock == SMB2_OPLOCK_LEVEL_NOCHANGE)
5261 return;
5262
5263 + /* Check if the server granted an oplock rather than a lease */
5264 + if (oplock & SMB2_OPLOCK_LEVEL_EXCLUSIVE)
5265 + return smb2_set_oplock_level(cinode, oplock, epoch,
5266 + purge_cache);
5267 +
5268 if (oplock & SMB2_LEASE_READ_CACHING_HE) {
5269 new_oplock |= CIFS_CACHE_READ_FLG;
5270 strcat(message, "R");
5271 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
5272 index cbe633f1840a..b1f5d0d28335 100644
5273 --- a/fs/cifs/smb2pdu.c
5274 +++ b/fs/cifs/smb2pdu.c
5275 @@ -2192,7 +2192,7 @@ SMB2_open_init(struct cifs_tcon *tcon, struct smb_rqst *rqst, __u8 *oplock,
5276 iov[1].iov_len = uni_path_len;
5277 iov[1].iov_base = path;
5278
5279 - if (!server->oplocks)
5280 + if ((!server->oplocks) || (tcon->no_lease))
5281 *oplock = SMB2_OPLOCK_LEVEL_NONE;
5282
5283 if (!(server->capabilities & SMB2_GLOBAL_CAP_LEASING) ||
5284 diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
5285 index 50ddb795aaeb..a2db401a58ed 100644
5286 --- a/fs/cifs/xattr.c
5287 +++ b/fs/cifs/xattr.c
5288 @@ -31,7 +31,7 @@
5289 #include "cifs_fs_sb.h"
5290 #include "cifs_unicode.h"
5291
5292 -#define MAX_EA_VALUE_SIZE 65535
5293 +#define MAX_EA_VALUE_SIZE CIFSMaxBufSize
5294 #define CIFS_XATTR_CIFS_ACL "system.cifs_acl"
5295 #define CIFS_XATTR_ATTRIB "cifs.dosattrib" /* full name: user.cifs.dosattrib */
5296 #define CIFS_XATTR_CREATETIME "cifs.creationtime" /* user.cifs.creationtime */
5297 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
5298 index 00bf0b67aae8..f81eb1785af2 100644
5299 --- a/fs/ext4/extents.c
5300 +++ b/fs/ext4/extents.c
5301 @@ -3748,8 +3748,8 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
5302 * illegal.
5303 */
5304 if (ee_block != map->m_lblk || ee_len > map->m_len) {
5305 -#ifdef EXT4_DEBUG
5306 - ext4_warning("Inode (%ld) finished: extent logical block %llu,"
5307 +#ifdef CONFIG_EXT4_DEBUG
5308 + ext4_warning(inode->i_sb, "Inode (%ld) finished: extent logical block %llu,"
5309 " len %u; IO logical block %llu, len %u",
5310 inode->i_ino, (unsigned long long)ee_block, ee_len,
5311 (unsigned long long)map->m_lblk, map->m_len);
5312 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
5313 index cff6277f7a9f..a0c94c365a4c 100644
5314 --- a/fs/ext4/inode.c
5315 +++ b/fs/ext4/inode.c
5316 @@ -4265,6 +4265,15 @@ int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
5317
5318 trace_ext4_punch_hole(inode, offset, length, 0);
5319
5320 + ext4_clear_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA);
5321 + if (ext4_has_inline_data(inode)) {
5322 + down_write(&EXT4_I(inode)->i_mmap_sem);
5323 + ret = ext4_convert_inline_data(inode);
5324 + up_write(&EXT4_I(inode)->i_mmap_sem);
5325 + if (ret)
5326 + return ret;
5327 + }
5328 +
5329 /*
5330 * Write out all dirty pages to avoid race conditions
5331 * Then release them.
5332 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
5333 index 6ee471b72a34..6d39143cfa09 100644
5334 --- a/fs/fuse/dev.c
5335 +++ b/fs/fuse/dev.c
5336 @@ -331,7 +331,7 @@ static void queue_request(struct fuse_iqueue *fiq, struct fuse_req *req)
5337 req->in.h.len = sizeof(struct fuse_in_header) +
5338 len_args(req->in.numargs, (struct fuse_arg *) req->in.args);
5339 list_add_tail(&req->list, &fiq->pending);
5340 - wake_up_locked(&fiq->waitq);
5341 + wake_up(&fiq->waitq);
5342 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
5343 }
5344
5345 @@ -343,16 +343,16 @@ void fuse_queue_forget(struct fuse_conn *fc, struct fuse_forget_link *forget,
5346 forget->forget_one.nodeid = nodeid;
5347 forget->forget_one.nlookup = nlookup;
5348
5349 - spin_lock(&fiq->waitq.lock);
5350 + spin_lock(&fiq->lock);
5351 if (fiq->connected) {
5352 fiq->forget_list_tail->next = forget;
5353 fiq->forget_list_tail = forget;
5354 - wake_up_locked(&fiq->waitq);
5355 + wake_up(&fiq->waitq);
5356 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
5357 } else {
5358 kfree(forget);
5359 }
5360 - spin_unlock(&fiq->waitq.lock);
5361 + spin_unlock(&fiq->lock);
5362 }
5363
5364 static void flush_bg_queue(struct fuse_conn *fc)
5365 @@ -365,10 +365,10 @@ static void flush_bg_queue(struct fuse_conn *fc)
5366 req = list_entry(fc->bg_queue.next, struct fuse_req, list);
5367 list_del(&req->list);
5368 fc->active_background++;
5369 - spin_lock(&fiq->waitq.lock);
5370 + spin_lock(&fiq->lock);
5371 req->in.h.unique = fuse_get_unique(fiq);
5372 queue_request(fiq, req);
5373 - spin_unlock(&fiq->waitq.lock);
5374 + spin_unlock(&fiq->lock);
5375 }
5376 }
5377
5378 @@ -387,9 +387,9 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
5379 if (test_and_set_bit(FR_FINISHED, &req->flags))
5380 goto put_request;
5381
5382 - spin_lock(&fiq->waitq.lock);
5383 + spin_lock(&fiq->lock);
5384 list_del_init(&req->intr_entry);
5385 - spin_unlock(&fiq->waitq.lock);
5386 + spin_unlock(&fiq->lock);
5387 WARN_ON(test_bit(FR_PENDING, &req->flags));
5388 WARN_ON(test_bit(FR_SENT, &req->flags));
5389 if (test_bit(FR_BACKGROUND, &req->flags)) {
5390 @@ -427,16 +427,16 @@ put_request:
5391
5392 static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
5393 {
5394 - spin_lock(&fiq->waitq.lock);
5395 + spin_lock(&fiq->lock);
5396 if (test_bit(FR_FINISHED, &req->flags)) {
5397 - spin_unlock(&fiq->waitq.lock);
5398 + spin_unlock(&fiq->lock);
5399 return;
5400 }
5401 if (list_empty(&req->intr_entry)) {
5402 list_add_tail(&req->intr_entry, &fiq->interrupts);
5403 - wake_up_locked(&fiq->waitq);
5404 + wake_up(&fiq->waitq);
5405 }
5406 - spin_unlock(&fiq->waitq.lock);
5407 + spin_unlock(&fiq->lock);
5408 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
5409 }
5410
5411 @@ -466,16 +466,16 @@ static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
5412 if (!err)
5413 return;
5414
5415 - spin_lock(&fiq->waitq.lock);
5416 + spin_lock(&fiq->lock);
5417 /* Request is not yet in userspace, bail out */
5418 if (test_bit(FR_PENDING, &req->flags)) {
5419 list_del(&req->list);
5420 - spin_unlock(&fiq->waitq.lock);
5421 + spin_unlock(&fiq->lock);
5422 __fuse_put_request(req);
5423 req->out.h.error = -EINTR;
5424 return;
5425 }
5426 - spin_unlock(&fiq->waitq.lock);
5427 + spin_unlock(&fiq->lock);
5428 }
5429
5430 /*
5431 @@ -490,9 +490,9 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
5432 struct fuse_iqueue *fiq = &fc->iq;
5433
5434 BUG_ON(test_bit(FR_BACKGROUND, &req->flags));
5435 - spin_lock(&fiq->waitq.lock);
5436 + spin_lock(&fiq->lock);
5437 if (!fiq->connected) {
5438 - spin_unlock(&fiq->waitq.lock);
5439 + spin_unlock(&fiq->lock);
5440 req->out.h.error = -ENOTCONN;
5441 } else {
5442 req->in.h.unique = fuse_get_unique(fiq);
5443 @@ -500,7 +500,7 @@ static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
5444 /* acquire extra reference, since request is still needed
5445 after request_end() */
5446 __fuse_get_request(req);
5447 - spin_unlock(&fiq->waitq.lock);
5448 + spin_unlock(&fiq->lock);
5449
5450 request_wait_answer(fc, req);
5451 /* Pairs with smp_wmb() in request_end() */
5452 @@ -633,12 +633,12 @@ static int fuse_request_send_notify_reply(struct fuse_conn *fc,
5453
5454 __clear_bit(FR_ISREPLY, &req->flags);
5455 req->in.h.unique = unique;
5456 - spin_lock(&fiq->waitq.lock);
5457 + spin_lock(&fiq->lock);
5458 if (fiq->connected) {
5459 queue_request(fiq, req);
5460 err = 0;
5461 }
5462 - spin_unlock(&fiq->waitq.lock);
5463 + spin_unlock(&fiq->lock);
5464
5465 return err;
5466 }
5467 @@ -1082,12 +1082,12 @@ static int request_pending(struct fuse_iqueue *fiq)
5468 * Unlike other requests this is assembled on demand, without a need
5469 * to allocate a separate fuse_req structure.
5470 *
5471 - * Called with fiq->waitq.lock held, releases it
5472 + * Called with fiq->lock held, releases it
5473 */
5474 static int fuse_read_interrupt(struct fuse_iqueue *fiq,
5475 struct fuse_copy_state *cs,
5476 size_t nbytes, struct fuse_req *req)
5477 -__releases(fiq->waitq.lock)
5478 +__releases(fiq->lock)
5479 {
5480 struct fuse_in_header ih;
5481 struct fuse_interrupt_in arg;
5482 @@ -1103,7 +1103,7 @@ __releases(fiq->waitq.lock)
5483 ih.unique = req->intr_unique;
5484 arg.unique = req->in.h.unique;
5485
5486 - spin_unlock(&fiq->waitq.lock);
5487 + spin_unlock(&fiq->lock);
5488 if (nbytes < reqsize)
5489 return -EINVAL;
5490
5491 @@ -1140,7 +1140,7 @@ static struct fuse_forget_link *dequeue_forget(struct fuse_iqueue *fiq,
5492 static int fuse_read_single_forget(struct fuse_iqueue *fiq,
5493 struct fuse_copy_state *cs,
5494 size_t nbytes)
5495 -__releases(fiq->waitq.lock)
5496 +__releases(fiq->lock)
5497 {
5498 int err;
5499 struct fuse_forget_link *forget = dequeue_forget(fiq, 1, NULL);
5500 @@ -1154,7 +1154,7 @@ __releases(fiq->waitq.lock)
5501 .len = sizeof(ih) + sizeof(arg),
5502 };
5503
5504 - spin_unlock(&fiq->waitq.lock);
5505 + spin_unlock(&fiq->lock);
5506 kfree(forget);
5507 if (nbytes < ih.len)
5508 return -EINVAL;
5509 @@ -1172,7 +1172,7 @@ __releases(fiq->waitq.lock)
5510
5511 static int fuse_read_batch_forget(struct fuse_iqueue *fiq,
5512 struct fuse_copy_state *cs, size_t nbytes)
5513 -__releases(fiq->waitq.lock)
5514 +__releases(fiq->lock)
5515 {
5516 int err;
5517 unsigned max_forgets;
5518 @@ -1186,13 +1186,13 @@ __releases(fiq->waitq.lock)
5519 };
5520
5521 if (nbytes < ih.len) {
5522 - spin_unlock(&fiq->waitq.lock);
5523 + spin_unlock(&fiq->lock);
5524 return -EINVAL;
5525 }
5526
5527 max_forgets = (nbytes - ih.len) / sizeof(struct fuse_forget_one);
5528 head = dequeue_forget(fiq, max_forgets, &count);
5529 - spin_unlock(&fiq->waitq.lock);
5530 + spin_unlock(&fiq->lock);
5531
5532 arg.count = count;
5533 ih.len += count * sizeof(struct fuse_forget_one);
5534 @@ -1222,7 +1222,7 @@ __releases(fiq->waitq.lock)
5535 static int fuse_read_forget(struct fuse_conn *fc, struct fuse_iqueue *fiq,
5536 struct fuse_copy_state *cs,
5537 size_t nbytes)
5538 -__releases(fiq->waitq.lock)
5539 +__releases(fiq->lock)
5540 {
5541 if (fc->minor < 16 || fiq->forget_list_head.next->next == NULL)
5542 return fuse_read_single_forget(fiq, cs, nbytes);
5543 @@ -1251,16 +1251,19 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
5544 unsigned reqsize;
5545
5546 restart:
5547 - spin_lock(&fiq->waitq.lock);
5548 - err = -EAGAIN;
5549 - if ((file->f_flags & O_NONBLOCK) && fiq->connected &&
5550 - !request_pending(fiq))
5551 - goto err_unlock;
5552 + for (;;) {
5553 + spin_lock(&fiq->lock);
5554 + if (!fiq->connected || request_pending(fiq))
5555 + break;
5556 + spin_unlock(&fiq->lock);
5557
5558 - err = wait_event_interruptible_exclusive_locked(fiq->waitq,
5559 + if (file->f_flags & O_NONBLOCK)
5560 + return -EAGAIN;
5561 + err = wait_event_interruptible_exclusive(fiq->waitq,
5562 !fiq->connected || request_pending(fiq));
5563 - if (err)
5564 - goto err_unlock;
5565 + if (err)
5566 + return err;
5567 + }
5568
5569 if (!fiq->connected) {
5570 err = (fc->aborted && fc->abort_err) ? -ECONNABORTED : -ENODEV;
5571 @@ -1284,7 +1287,7 @@ static ssize_t fuse_dev_do_read(struct fuse_dev *fud, struct file *file,
5572 req = list_entry(fiq->pending.next, struct fuse_req, list);
5573 clear_bit(FR_PENDING, &req->flags);
5574 list_del_init(&req->list);
5575 - spin_unlock(&fiq->waitq.lock);
5576 + spin_unlock(&fiq->lock);
5577
5578 in = &req->in;
5579 reqsize = in->h.len;
5580 @@ -1341,7 +1344,7 @@ out_end:
5581 return err;
5582
5583 err_unlock:
5584 - spin_unlock(&fiq->waitq.lock);
5585 + spin_unlock(&fiq->lock);
5586 return err;
5587 }
5588
5589 @@ -2054,12 +2057,12 @@ static __poll_t fuse_dev_poll(struct file *file, poll_table *wait)
5590 fiq = &fud->fc->iq;
5591 poll_wait(file, &fiq->waitq, wait);
5592
5593 - spin_lock(&fiq->waitq.lock);
5594 + spin_lock(&fiq->lock);
5595 if (!fiq->connected)
5596 mask = EPOLLERR;
5597 else if (request_pending(fiq))
5598 mask |= EPOLLIN | EPOLLRDNORM;
5599 - spin_unlock(&fiq->waitq.lock);
5600 + spin_unlock(&fiq->lock);
5601
5602 return mask;
5603 }
5604 @@ -2150,15 +2153,15 @@ void fuse_abort_conn(struct fuse_conn *fc, bool is_abort)
5605 fc->max_background = UINT_MAX;
5606 flush_bg_queue(fc);
5607
5608 - spin_lock(&fiq->waitq.lock);
5609 + spin_lock(&fiq->lock);
5610 fiq->connected = 0;
5611 list_for_each_entry(req, &fiq->pending, list)
5612 clear_bit(FR_PENDING, &req->flags);
5613 list_splice_tail_init(&fiq->pending, &to_end);
5614 while (forget_pending(fiq))
5615 kfree(dequeue_forget(fiq, 1, NULL));
5616 - wake_up_all_locked(&fiq->waitq);
5617 - spin_unlock(&fiq->waitq.lock);
5618 + wake_up_all(&fiq->waitq);
5619 + spin_unlock(&fiq->lock);
5620 kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
5621 end_polls(fc);
5622 wake_up_all(&fc->blocked_waitq);
5623 diff --git a/fs/fuse/file.c b/fs/fuse/file.c
5624 index 9a22aa580fe7..96d46b3ad235 100644
5625 --- a/fs/fuse/file.c
5626 +++ b/fs/fuse/file.c
5627 @@ -1700,6 +1700,7 @@ static int fuse_writepage(struct page *page, struct writeback_control *wbc)
5628 WARN_ON(wbc->sync_mode == WB_SYNC_ALL);
5629
5630 redirty_page_for_writepage(wbc, page);
5631 + unlock_page(page);
5632 return 0;
5633 }
5634
5635 diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
5636 index cec8b8e74969..900bdcf79bfc 100644
5637 --- a/fs/fuse/fuse_i.h
5638 +++ b/fs/fuse/fuse_i.h
5639 @@ -388,6 +388,9 @@ struct fuse_iqueue {
5640 /** Connection established */
5641 unsigned connected;
5642
5643 + /** Lock protecting accesses to members of this structure */
5644 + spinlock_t lock;
5645 +
5646 /** Readers of the connection are waiting on this */
5647 wait_queue_head_t waitq;
5648
5649 diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
5650 index db9e60b7eb69..cb018315ecaf 100644
5651 --- a/fs/fuse/inode.c
5652 +++ b/fs/fuse/inode.c
5653 @@ -585,6 +585,7 @@ static int fuse_show_options(struct seq_file *m, struct dentry *root)
5654 static void fuse_iqueue_init(struct fuse_iqueue *fiq)
5655 {
5656 memset(fiq, 0, sizeof(struct fuse_iqueue));
5657 + spin_lock_init(&fiq->lock);
5658 init_waitqueue_head(&fiq->waitq);
5659 INIT_LIST_HEAD(&fiq->pending);
5660 INIT_LIST_HEAD(&fiq->interrupts);
5661 diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
5662 index d14d71d8d7ee..52feccedd7a4 100644
5663 --- a/fs/gfs2/bmap.c
5664 +++ b/fs/gfs2/bmap.c
5665 @@ -1630,6 +1630,7 @@ out_unlock:
5666 brelse(dibh);
5667 up_write(&ip->i_rw_mutex);
5668 gfs2_trans_end(sdp);
5669 + buf_in_tr = false;
5670 }
5671 gfs2_glock_dq_uninit(rd_gh);
5672 cond_resched();
5673 diff --git a/fs/overlayfs/export.c b/fs/overlayfs/export.c
5674 index 54e5d17d7f3e..6fe303850c9e 100644
5675 --- a/fs/overlayfs/export.c
5676 +++ b/fs/overlayfs/export.c
5677 @@ -230,9 +230,8 @@ static int ovl_d_to_fh(struct dentry *dentry, char *buf, int buflen)
5678 /* Encode an upper or lower file handle */
5679 fh = ovl_encode_real_fh(enc_lower ? ovl_dentry_lower(dentry) :
5680 ovl_dentry_upper(dentry), !enc_lower);
5681 - err = PTR_ERR(fh);
5682 if (IS_ERR(fh))
5683 - goto fail;
5684 + return PTR_ERR(fh);
5685
5686 err = -EOVERFLOW;
5687 if (fh->len > buflen)
5688 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
5689 index f0389849fd80..4f4964eeb086 100644
5690 --- a/fs/overlayfs/inode.c
5691 +++ b/fs/overlayfs/inode.c
5692 @@ -386,7 +386,8 @@ static bool ovl_can_list(const char *s)
5693 return true;
5694
5695 /* Never list trusted.overlay, list other trusted for superuser only */
5696 - return !ovl_is_private_xattr(s) && capable(CAP_SYS_ADMIN);
5697 + return !ovl_is_private_xattr(s) &&
5698 + ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN);
5699 }
5700
5701 ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
5702 diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
5703 index 1da59c16f637..2885dce1ad49 100644
5704 --- a/include/linux/blk-mq.h
5705 +++ b/include/linux/blk-mq.h
5706 @@ -114,6 +114,7 @@ typedef void (busy_iter_fn)(struct blk_mq_hw_ctx *, struct request *, void *,
5707 typedef void (busy_tag_iter_fn)(struct request *, void *, bool);
5708 typedef int (poll_fn)(struct blk_mq_hw_ctx *, unsigned int);
5709 typedef int (map_queues_fn)(struct blk_mq_tag_set *set);
5710 +typedef void (cleanup_rq_fn)(struct request *);
5711
5712
5713 struct blk_mq_ops {
5714 @@ -165,6 +166,12 @@ struct blk_mq_ops {
5715 /* Called from inside blk_get_request() */
5716 void (*initialize_rq_fn)(struct request *rq);
5717
5718 + /*
5719 + * Called before freeing one request which isn't completed yet,
5720 + * and usually for freeing the driver private data
5721 + */
5722 + cleanup_rq_fn *cleanup_rq;
5723 +
5724 map_queues_fn *map_queues;
5725
5726 #ifdef CONFIG_BLK_DEBUG_FS
5727 @@ -324,4 +331,10 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
5728 for ((i) = 0; (i) < (hctx)->nr_ctx && \
5729 ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
5730
5731 +static inline void blk_mq_cleanup_rq(struct request *rq)
5732 +{
5733 + if (rq->q->mq_ops->cleanup_rq)
5734 + rq->q->mq_ops->cleanup_rq(rq);
5735 +}
5736 +
5737 #endif
5738 diff --git a/include/linux/bug.h b/include/linux/bug.h
5739 index fe5916550da8..f639bd0122f3 100644
5740 --- a/include/linux/bug.h
5741 +++ b/include/linux/bug.h
5742 @@ -47,6 +47,11 @@ void generic_bug_clear_once(void);
5743
5744 #else /* !CONFIG_GENERIC_BUG */
5745
5746 +static inline void *find_bug(unsigned long bugaddr)
5747 +{
5748 + return NULL;
5749 +}
5750 +
5751 static inline enum bug_trap_type report_bug(unsigned long bug_addr,
5752 struct pt_regs *regs)
5753 {
5754 diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
5755 index 2ff52de1c2b8..840462ed1ec7 100644
5756 --- a/include/linux/mmc/host.h
5757 +++ b/include/linux/mmc/host.h
5758 @@ -488,6 +488,15 @@ void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
5759
5760 void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq);
5761
5762 +/*
5763 + * May be called from host driver's system/runtime suspend/resume callbacks,
5764 + * to know if SDIO IRQs has been claimed.
5765 + */
5766 +static inline bool sdio_irq_claimed(struct mmc_host *host)
5767 +{
5768 + return host->sdio_irqs > 0;
5769 +}
5770 +
5771 static inline void mmc_signal_sdio_irq(struct mmc_host *host)
5772 {
5773 host->ops->enable_sdio_irq(host, 0);
5774 diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
5775 index dc905a4ff8d7..185d94829701 100644
5776 --- a/include/linux/quotaops.h
5777 +++ b/include/linux/quotaops.h
5778 @@ -22,7 +22,7 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
5779 /* i_mutex must being held */
5780 static inline bool is_quota_modification(struct inode *inode, struct iattr *ia)
5781 {
5782 - return (ia->ia_valid & ATTR_SIZE && ia->ia_size != inode->i_size) ||
5783 + return (ia->ia_valid & ATTR_SIZE) ||
5784 (ia->ia_valid & ATTR_UID && !uid_eq(ia->ia_uid, inode->i_uid)) ||
5785 (ia->ia_valid & ATTR_GID && !gid_eq(ia->ia_gid, inode->i_gid));
5786 }
5787 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
5788 index 714d63f60460..b8efca9dc2cb 100644
5789 --- a/kernel/kprobes.c
5790 +++ b/kernel/kprobes.c
5791 @@ -1505,7 +1505,8 @@ static int check_kprobe_address_safe(struct kprobe *p,
5792 /* Ensure it is not in reserved area nor out of text */
5793 if (!kernel_text_address((unsigned long) p->addr) ||
5794 within_kprobe_blacklist((unsigned long) p->addr) ||
5795 - jump_label_text_reserved(p->addr, p->addr)) {
5796 + jump_label_text_reserved(p->addr, p->addr) ||
5797 + find_bug((unsigned long)p->addr)) {
5798 ret = -EINVAL;
5799 goto out;
5800 }
5801 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
5802 index 06045abd1887..d0d03223b45b 100644
5803 --- a/kernel/printk/printk.c
5804 +++ b/kernel/printk/printk.c
5805 @@ -3210,7 +3210,7 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
5806 /* move first record forward until length fits into the buffer */
5807 seq = dumper->cur_seq;
5808 idx = dumper->cur_idx;
5809 - while (l > size && seq < dumper->next_seq) {
5810 + while (l >= size && seq < dumper->next_seq) {
5811 struct printk_log *msg = log_from_idx(idx);
5812
5813 l -= msg_print_text(msg, true, NULL, 0);
5814 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
5815 index 795c63ca44a9..f4e050681ba1 100644
5816 --- a/kernel/sched/core.c
5817 +++ b/kernel/sched/core.c
5818 @@ -3066,8 +3066,36 @@ void scheduler_tick(void)
5819
5820 struct tick_work {
5821 int cpu;
5822 + atomic_t state;
5823 struct delayed_work work;
5824 };
5825 +/* Values for ->state, see diagram below. */
5826 +#define TICK_SCHED_REMOTE_OFFLINE 0
5827 +#define TICK_SCHED_REMOTE_OFFLINING 1
5828 +#define TICK_SCHED_REMOTE_RUNNING 2
5829 +
5830 +/*
5831 + * State diagram for ->state:
5832 + *
5833 + *
5834 + * TICK_SCHED_REMOTE_OFFLINE
5835 + * | ^
5836 + * | |
5837 + * | | sched_tick_remote()
5838 + * | |
5839 + * | |
5840 + * +--TICK_SCHED_REMOTE_OFFLINING
5841 + * | ^
5842 + * | |
5843 + * sched_tick_start() | | sched_tick_stop()
5844 + * | |
5845 + * V |
5846 + * TICK_SCHED_REMOTE_RUNNING
5847 + *
5848 + *
5849 + * Other transitions get WARN_ON_ONCE(), except that sched_tick_remote()
5850 + * and sched_tick_start() are happy to leave the state in RUNNING.
5851 + */
5852
5853 static struct tick_work __percpu *tick_work_cpu;
5854
5855 @@ -3080,6 +3108,7 @@ static void sched_tick_remote(struct work_struct *work)
5856 struct task_struct *curr;
5857 struct rq_flags rf;
5858 u64 delta;
5859 + int os;
5860
5861 /*
5862 * Handle the tick only if it appears the remote CPU is running in full
5863 @@ -3093,7 +3122,7 @@ static void sched_tick_remote(struct work_struct *work)
5864
5865 rq_lock_irq(rq, &rf);
5866 curr = rq->curr;
5867 - if (is_idle_task(curr))
5868 + if (is_idle_task(curr) || cpu_is_offline(cpu))
5869 goto out_unlock;
5870
5871 update_rq_clock(rq);
5872 @@ -3113,13 +3142,18 @@ out_requeue:
5873 /*
5874 * Run the remote tick once per second (1Hz). This arbitrary
5875 * frequency is large enough to avoid overload but short enough
5876 - * to keep scheduler internal stats reasonably up to date.
5877 + * to keep scheduler internal stats reasonably up to date. But
5878 + * first update state to reflect hotplug activity if required.
5879 */
5880 - queue_delayed_work(system_unbound_wq, dwork, HZ);
5881 + os = atomic_fetch_add_unless(&twork->state, -1, TICK_SCHED_REMOTE_RUNNING);
5882 + WARN_ON_ONCE(os == TICK_SCHED_REMOTE_OFFLINE);
5883 + if (os == TICK_SCHED_REMOTE_RUNNING)
5884 + queue_delayed_work(system_unbound_wq, dwork, HZ);
5885 }
5886
5887 static void sched_tick_start(int cpu)
5888 {
5889 + int os;
5890 struct tick_work *twork;
5891
5892 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
5893 @@ -3128,15 +3162,20 @@ static void sched_tick_start(int cpu)
5894 WARN_ON_ONCE(!tick_work_cpu);
5895
5896 twork = per_cpu_ptr(tick_work_cpu, cpu);
5897 - twork->cpu = cpu;
5898 - INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5899 - queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5900 + os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_RUNNING);
5901 + WARN_ON_ONCE(os == TICK_SCHED_REMOTE_RUNNING);
5902 + if (os == TICK_SCHED_REMOTE_OFFLINE) {
5903 + twork->cpu = cpu;
5904 + INIT_DELAYED_WORK(&twork->work, sched_tick_remote);
5905 + queue_delayed_work(system_unbound_wq, &twork->work, HZ);
5906 + }
5907 }
5908
5909 #ifdef CONFIG_HOTPLUG_CPU
5910 static void sched_tick_stop(int cpu)
5911 {
5912 struct tick_work *twork;
5913 + int os;
5914
5915 if (housekeeping_cpu(cpu, HK_FLAG_TICK))
5916 return;
5917 @@ -3144,7 +3183,10 @@ static void sched_tick_stop(int cpu)
5918 WARN_ON_ONCE(!tick_work_cpu);
5919
5920 twork = per_cpu_ptr(tick_work_cpu, cpu);
5921 - cancel_delayed_work_sync(&twork->work);
5922 + /* There cannot be competing actions, but don't rely on stop-machine. */
5923 + os = atomic_xchg(&twork->state, TICK_SCHED_REMOTE_OFFLINING);
5924 + WARN_ON_ONCE(os != TICK_SCHED_REMOTE_RUNNING);
5925 + /* Don't cancel, as this would mess up the state machine. */
5926 }
5927 #endif /* CONFIG_HOTPLUG_CPU */
5928
5929 @@ -3152,7 +3194,6 @@ int __init sched_tick_offload_init(void)
5930 {
5931 tick_work_cpu = alloc_percpu(struct tick_work);
5932 BUG_ON(!tick_work_cpu);
5933 -
5934 return 0;
5935 }
5936
5937 @@ -6453,10 +6494,6 @@ static int cpu_cgroup_can_attach(struct cgroup_taskset *tset)
5938 #ifdef CONFIG_RT_GROUP_SCHED
5939 if (!sched_rt_can_attach(css_tg(css), task))
5940 return -EINVAL;
5941 -#else
5942 - /* We don't support RT-tasks being in separate groups */
5943 - if (task->sched_class != &fair_sched_class)
5944 - return -EINVAL;
5945 #endif
5946 /*
5947 * Serialize against wake_up_new_task() such that if its
5948 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
5949 index 64d54acc9928..54fcff656ecd 100644
5950 --- a/kernel/sched/cpufreq_schedutil.c
5951 +++ b/kernel/sched/cpufreq_schedutil.c
5952 @@ -118,6 +118,7 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
5953 unsigned int next_freq)
5954 {
5955 struct cpufreq_policy *policy = sg_policy->policy;
5956 + int cpu;
5957
5958 if (!sugov_update_next_freq(sg_policy, time, next_freq))
5959 return;
5960 @@ -127,7 +128,11 @@ static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
5961 return;
5962
5963 policy->cur = next_freq;
5964 - trace_cpu_frequency(next_freq, smp_processor_id());
5965 +
5966 + if (trace_cpu_frequency_enabled()) {
5967 + for_each_cpu(cpu, policy->cpus)
5968 + trace_cpu_frequency(next_freq, cpu);
5969 + }
5970 }
5971
5972 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
5973 diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
5974 index 72c07059ef37..ebec37cb3be9 100644
5975 --- a/kernel/sched/deadline.c
5976 +++ b/kernel/sched/deadline.c
5977 @@ -529,6 +529,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq);
5978 static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p)
5979 {
5980 struct rq *later_rq = NULL;
5981 + struct dl_bw *dl_b;
5982
5983 later_rq = find_lock_later_rq(p, rq);
5984 if (!later_rq) {
5985 @@ -557,6 +558,38 @@ static struct rq *dl_task_offline_migration(struct rq *rq, struct task_struct *p
5986 double_lock_balance(rq, later_rq);
5987 }
5988
5989 + if (p->dl.dl_non_contending || p->dl.dl_throttled) {
5990 + /*
5991 + * Inactive timer is armed (or callback is running, but
5992 + * waiting for us to release rq locks). In any case, when it
5993 + * will fire (or continue), it will see running_bw of this
5994 + * task migrated to later_rq (and correctly handle it).
5995 + */
5996 + sub_running_bw(&p->dl, &rq->dl);
5997 + sub_rq_bw(&p->dl, &rq->dl);
5998 +
5999 + add_rq_bw(&p->dl, &later_rq->dl);
6000 + add_running_bw(&p->dl, &later_rq->dl);
6001 + } else {
6002 + sub_rq_bw(&p->dl, &rq->dl);
6003 + add_rq_bw(&p->dl, &later_rq->dl);
6004 + }
6005 +
6006 + /*
6007 + * And we finally need to fixup root_domain(s) bandwidth accounting,
6008 + * since p is still hanging out in the old (now moved to default) root
6009 + * domain.
6010 + */
6011 + dl_b = &rq->rd->dl_bw;
6012 + raw_spin_lock(&dl_b->lock);
6013 + __dl_sub(dl_b, p->dl.dl_bw, cpumask_weight(rq->rd->span));
6014 + raw_spin_unlock(&dl_b->lock);
6015 +
6016 + dl_b = &later_rq->rd->dl_bw;
6017 + raw_spin_lock(&dl_b->lock);
6018 + __dl_add(dl_b, p->dl.dl_bw, cpumask_weight(later_rq->rd->span));
6019 + raw_spin_unlock(&dl_b->lock);
6020 +
6021 set_task_cpu(p, later_rq->cpu);
6022 double_unlock_balance(later_rq, rq);
6023
6024 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
6025 index 49ed38914669..32d2dac680a7 100644
6026 --- a/kernel/sched/fair.c
6027 +++ b/kernel/sched/fair.c
6028 @@ -8863,9 +8863,10 @@ more_balance:
6029 out_balanced:
6030 /*
6031 * We reach balance although we may have faced some affinity
6032 - * constraints. Clear the imbalance flag if it was set.
6033 + * constraints. Clear the imbalance flag only if other tasks got
6034 + * a chance to move and fix the imbalance.
6035 */
6036 - if (sd_parent) {
6037 + if (sd_parent && !(env.flags & LBF_ALL_PINNED)) {
6038 int *group_imbalance = &sd_parent->groups->sgc->imbalance;
6039
6040 if (*group_imbalance)
6041 @@ -10078,18 +10079,18 @@ err:
6042 void online_fair_sched_group(struct task_group *tg)
6043 {
6044 struct sched_entity *se;
6045 + struct rq_flags rf;
6046 struct rq *rq;
6047 int i;
6048
6049 for_each_possible_cpu(i) {
6050 rq = cpu_rq(i);
6051 se = tg->se[i];
6052 -
6053 - raw_spin_lock_irq(&rq->lock);
6054 + rq_lock_irq(rq, &rf);
6055 update_rq_clock(rq);
6056 attach_entity_cfs_rq(se);
6057 sync_throttle(tg, i);
6058 - raw_spin_unlock_irq(&rq->lock);
6059 + rq_unlock_irq(rq, &rf);
6060 }
6061 }
6062
6063 diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
6064 index 16f84142f2f4..44a17366c8ec 100644
6065 --- a/kernel/sched/idle.c
6066 +++ b/kernel/sched/idle.c
6067 @@ -240,13 +240,14 @@ static void do_idle(void)
6068 check_pgt_cache();
6069 rmb();
6070
6071 + local_irq_disable();
6072 +
6073 if (cpu_is_offline(cpu)) {
6074 - tick_nohz_idle_stop_tick_protected();
6075 + tick_nohz_idle_stop_tick();
6076 cpuhp_report_idle_dead();
6077 arch_cpu_idle_dead();
6078 }
6079
6080 - local_irq_disable();
6081 arch_cpu_idle_enter();
6082
6083 /*
6084 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
6085 index fdeb9bc6affb..f4255a65c44b 100644
6086 --- a/kernel/time/alarmtimer.c
6087 +++ b/kernel/time/alarmtimer.c
6088 @@ -676,7 +676,7 @@ static int alarm_timer_create(struct k_itimer *new_timer)
6089 enum alarmtimer_type type;
6090
6091 if (!alarmtimer_get_rtcdev())
6092 - return -ENOTSUPP;
6093 + return -EOPNOTSUPP;
6094
6095 if (!capable(CAP_WAKE_ALARM))
6096 return -EPERM;
6097 @@ -794,7 +794,7 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
6098 int ret = 0;
6099
6100 if (!alarmtimer_get_rtcdev())
6101 - return -ENOTSUPP;
6102 + return -EOPNOTSUPP;
6103
6104 if (flags & ~TIMER_ABSTIME)
6105 return -EINVAL;
6106 diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
6107 index 76801b9b481e..d62d7ae5201c 100644
6108 --- a/kernel/time/posix-cpu-timers.c
6109 +++ b/kernel/time/posix-cpu-timers.c
6110 @@ -375,7 +375,8 @@ static int posix_cpu_timer_del(struct k_itimer *timer)
6111 struct sighand_struct *sighand;
6112 struct task_struct *p = timer->it.cpu.task;
6113
6114 - WARN_ON_ONCE(p == NULL);
6115 + if (WARN_ON_ONCE(!p))
6116 + return -EINVAL;
6117
6118 /*
6119 * Protect against sighand release/switch in exit/exec and process/
6120 @@ -580,7 +581,8 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
6121 u64 old_expires, new_expires, old_incr, val;
6122 int ret;
6123
6124 - WARN_ON_ONCE(p == NULL);
6125 + if (WARN_ON_ONCE(!p))
6126 + return -EINVAL;
6127
6128 /*
6129 * Use the to_ktime conversion because that clamps the maximum
6130 @@ -716,10 +718,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
6131
6132 static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec64 *itp)
6133 {
6134 - u64 now;
6135 struct task_struct *p = timer->it.cpu.task;
6136 + u64 now;
6137
6138 - WARN_ON_ONCE(p == NULL);
6139 + if (WARN_ON_ONCE(!p))
6140 + return;
6141
6142 /*
6143 * Easy part: convert the reload time.
6144 @@ -1004,12 +1007,13 @@ static void check_process_timers(struct task_struct *tsk,
6145 */
6146 static void posix_cpu_timer_rearm(struct k_itimer *timer)
6147 {
6148 + struct task_struct *p = timer->it.cpu.task;
6149 struct sighand_struct *sighand;
6150 unsigned long flags;
6151 - struct task_struct *p = timer->it.cpu.task;
6152 u64 now;
6153
6154 - WARN_ON_ONCE(p == NULL);
6155 + if (WARN_ON_ONCE(!p))
6156 + return;
6157
6158 /*
6159 * Fetch the current sample and update the timer's expiry time.
6160 @@ -1206,7 +1210,9 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
6161 u64 now;
6162 int ret;
6163
6164 - WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
6165 + if (WARN_ON_ONCE(clock_idx >= CPUCLOCK_SCHED))
6166 + return;
6167 +
6168 ret = cpu_timer_sample_group(clock_idx, tsk, &now);
6169
6170 if (oldval && ret != -EINVAL) {
6171 diff --git a/mm/compaction.c b/mm/compaction.c
6172 index faca45ebe62d..5079ddbec8f9 100644
6173 --- a/mm/compaction.c
6174 +++ b/mm/compaction.c
6175 @@ -1540,6 +1540,17 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro
6176 unsigned long end_pfn = zone_end_pfn(zone);
6177 const bool sync = cc->mode != MIGRATE_ASYNC;
6178
6179 + /*
6180 + * These counters track activities during zone compaction. Initialize
6181 + * them before compacting a new zone.
6182 + */
6183 + cc->total_migrate_scanned = 0;
6184 + cc->total_free_scanned = 0;
6185 + cc->nr_migratepages = 0;
6186 + cc->nr_freepages = 0;
6187 + INIT_LIST_HEAD(&cc->freepages);
6188 + INIT_LIST_HEAD(&cc->migratepages);
6189 +
6190 cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
6191 ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
6192 cc->classzone_idx);
6193 @@ -1703,10 +1714,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
6194 {
6195 enum compact_result ret;
6196 struct compact_control cc = {
6197 - .nr_freepages = 0,
6198 - .nr_migratepages = 0,
6199 - .total_migrate_scanned = 0,
6200 - .total_free_scanned = 0,
6201 .order = order,
6202 .gfp_mask = gfp_mask,
6203 .zone = zone,
6204 @@ -1719,8 +1726,6 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
6205 .ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
6206 .ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
6207 };
6208 - INIT_LIST_HEAD(&cc.freepages);
6209 - INIT_LIST_HEAD(&cc.migratepages);
6210
6211 ret = compact_zone(zone, &cc);
6212
6213 @@ -1819,8 +1824,6 @@ static void compact_node(int nid)
6214 struct zone *zone;
6215 struct compact_control cc = {
6216 .order = -1,
6217 - .total_migrate_scanned = 0,
6218 - .total_free_scanned = 0,
6219 .mode = MIGRATE_SYNC,
6220 .ignore_skip_hint = true,
6221 .whole_zone = true,
6222 @@ -1834,11 +1837,7 @@ static void compact_node(int nid)
6223 if (!populated_zone(zone))
6224 continue;
6225
6226 - cc.nr_freepages = 0;
6227 - cc.nr_migratepages = 0;
6228 cc.zone = zone;
6229 - INIT_LIST_HEAD(&cc.freepages);
6230 - INIT_LIST_HEAD(&cc.migratepages);
6231
6232 compact_zone(zone, &cc);
6233
6234 @@ -1947,8 +1946,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
6235 struct zone *zone;
6236 struct compact_control cc = {
6237 .order = pgdat->kcompactd_max_order,
6238 - .total_migrate_scanned = 0,
6239 - .total_free_scanned = 0,
6240 .classzone_idx = pgdat->kcompactd_classzone_idx,
6241 .mode = MIGRATE_SYNC_LIGHT,
6242 .ignore_skip_hint = false,
6243 @@ -1972,16 +1969,10 @@ static void kcompactd_do_work(pg_data_t *pgdat)
6244 COMPACT_CONTINUE)
6245 continue;
6246
6247 - cc.nr_freepages = 0;
6248 - cc.nr_migratepages = 0;
6249 - cc.total_migrate_scanned = 0;
6250 - cc.total_free_scanned = 0;
6251 - cc.zone = zone;
6252 - INIT_LIST_HEAD(&cc.freepages);
6253 - INIT_LIST_HEAD(&cc.migratepages);
6254 -
6255 if (kthread_should_stop())
6256 return;
6257 +
6258 + cc.zone = zone;
6259 status = compact_zone(zone, &cc);
6260
6261 if (status == COMPACT_SUCCESS) {
6262 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
6263 index ecde75f2189b..65da189a433b 100644
6264 --- a/mm/memcontrol.c
6265 +++ b/mm/memcontrol.c
6266 @@ -2637,6 +2637,16 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
6267
6268 if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) &&
6269 !page_counter_try_charge(&memcg->kmem, nr_pages, &counter)) {
6270 +
6271 + /*
6272 + * Enforce __GFP_NOFAIL allocation because callers are not
6273 + * prepared to see failures and likely do not have any failure
6274 + * handling code.
6275 + */
6276 + if (gfp & __GFP_NOFAIL) {
6277 + page_counter_charge(&memcg->kmem, nr_pages);
6278 + return 0;
6279 + }
6280 cancel_charge(memcg, nr_pages);
6281 return -ENOMEM;
6282 }
6283 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
6284 index dbddb7a409dd..a581fe2a2f1f 100644
6285 --- a/mm/oom_kill.c
6286 +++ b/mm/oom_kill.c
6287 @@ -1089,9 +1089,10 @@ bool out_of_memory(struct oom_control *oc)
6288 * The OOM killer does not compensate for IO-less reclaim.
6289 * pagefault_out_of_memory lost its gfp context so we have to
6290 * make sure exclude 0 mask - all other users should have at least
6291 - * ___GFP_DIRECT_RECLAIM to get here.
6292 + * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to
6293 + * invoke the OOM killer even if it is a GFP_NOFS allocation.
6294 */
6295 - if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS))
6296 + if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc))
6297 return true;
6298
6299 /*
6300 diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
6301 index 795fbc6c06aa..9abb18fffbc3 100644
6302 --- a/net/appletalk/ddp.c
6303 +++ b/net/appletalk/ddp.c
6304 @@ -1028,6 +1028,11 @@ static int atalk_create(struct net *net, struct socket *sock, int protocol,
6305 */
6306 if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM)
6307 goto out;
6308 +
6309 + rc = -EPERM;
6310 + if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
6311 + goto out;
6312 +
6313 rc = -ENOMEM;
6314 sk = sk_alloc(net, PF_APPLETALK, GFP_KERNEL, &ddp_proto, kern);
6315 if (!sk)
6316 diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
6317 index 5d01edf8d819..44ec492f3dc2 100644
6318 --- a/net/ax25/af_ax25.c
6319 +++ b/net/ax25/af_ax25.c
6320 @@ -858,6 +858,8 @@ static int ax25_create(struct net *net, struct socket *sock, int protocol,
6321 break;
6322
6323 case SOCK_RAW:
6324 + if (!capable(CAP_NET_RAW))
6325 + return -EPERM;
6326 break;
6327 default:
6328 return -ESOCKTNOSUPPORT;
6329 diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
6330 index bc6b912603f1..89819745e482 100644
6331 --- a/net/ieee802154/socket.c
6332 +++ b/net/ieee802154/socket.c
6333 @@ -1018,6 +1018,9 @@ static int ieee802154_create(struct net *net, struct socket *sock,
6334
6335 switch (sock->type) {
6336 case SOCK_RAW:
6337 + rc = -EPERM;
6338 + if (!capable(CAP_NET_RAW))
6339 + goto out;
6340 proto = &ieee802154_raw_prot;
6341 ops = &ieee802154_raw_ops;
6342 break;
6343 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
6344 index 17335a370e64..9d775b8df57d 100644
6345 --- a/net/ipv4/tcp_timer.c
6346 +++ b/net/ipv4/tcp_timer.c
6347 @@ -219,7 +219,7 @@ static int tcp_write_timeout(struct sock *sk)
6348 struct inet_connection_sock *icsk = inet_csk(sk);
6349 struct tcp_sock *tp = tcp_sk(sk);
6350 struct net *net = sock_net(sk);
6351 - bool expired, do_reset;
6352 + bool expired = false, do_reset;
6353 int retry_until;
6354
6355 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
6356 @@ -251,9 +251,10 @@ static int tcp_write_timeout(struct sock *sk)
6357 if (tcp_out_of_resources(sk, do_reset))
6358 return 1;
6359 }
6360 + }
6361 + if (!expired)
6362 expired = retransmits_timed_out(sk, retry_until,
6363 icsk->icsk_user_timeout);
6364 - }
6365 tcp_fastopen_active_detect_blackhole(sk, expired);
6366
6367 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
6368 diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
6369 index ae296273ce3d..ff254e8c0c44 100644
6370 --- a/net/nfc/llcp_sock.c
6371 +++ b/net/nfc/llcp_sock.c
6372 @@ -1011,10 +1011,13 @@ static int llcp_sock_create(struct net *net, struct socket *sock,
6373 sock->type != SOCK_RAW)
6374 return -ESOCKTNOSUPPORT;
6375
6376 - if (sock->type == SOCK_RAW)
6377 + if (sock->type == SOCK_RAW) {
6378 + if (!capable(CAP_NET_RAW))
6379 + return -EPERM;
6380 sock->ops = &llcp_rawsock_ops;
6381 - else
6382 + } else {
6383 sock->ops = &llcp_sock_ops;
6384 + }
6385
6386 sk = nfc_llcp_sock_alloc(sock, sock->type, GFP_ATOMIC, kern);
6387 if (sk == NULL)
6388 diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
6389 index 0f5ce77460d4..8e396c7c8389 100644
6390 --- a/net/openvswitch/datapath.c
6391 +++ b/net/openvswitch/datapath.c
6392 @@ -2239,7 +2239,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
6393 [OVS_VPORT_ATTR_STATS] = { .len = sizeof(struct ovs_vport_stats) },
6394 [OVS_VPORT_ATTR_PORT_NO] = { .type = NLA_U32 },
6395 [OVS_VPORT_ATTR_TYPE] = { .type = NLA_U32 },
6396 - [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_U32 },
6397 + [OVS_VPORT_ATTR_UPCALL_PID] = { .type = NLA_UNSPEC },
6398 [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
6399 [OVS_VPORT_ATTR_IFINDEX] = { .type = NLA_U32 },
6400 [OVS_VPORT_ATTR_NETNSID] = { .type = NLA_S32 },
6401 diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
6402 index 86e1e37eb4e8..5c75118539bb 100644
6403 --- a/net/qrtr/qrtr.c
6404 +++ b/net/qrtr/qrtr.c
6405 @@ -157,6 +157,7 @@ static void __qrtr_node_release(struct kref *kref)
6406 list_del(&node->item);
6407 mutex_unlock(&qrtr_node_lock);
6408
6409 + cancel_work_sync(&node->work);
6410 skb_queue_purge(&node->rx_queue);
6411 kfree(node);
6412 }
6413 diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
6414 index 98635311a5a0..ea0738ceb5bb 100644
6415 --- a/net/sched/act_sample.c
6416 +++ b/net/sched/act_sample.c
6417 @@ -134,6 +134,7 @@ static bool tcf_sample_dev_ok_push(struct net_device *dev)
6418 case ARPHRD_TUNNEL6:
6419 case ARPHRD_SIT:
6420 case ARPHRD_IPGRE:
6421 + case ARPHRD_IP6GRE:
6422 case ARPHRD_VOID:
6423 case ARPHRD_NONE:
6424 return false;
6425 diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
6426 index 4159bcb479c6..e217ebc693f8 100644
6427 --- a/net/sched/cls_api.c
6428 +++ b/net/sched/cls_api.c
6429 @@ -2038,8 +2038,10 @@ out:
6430 void tcf_exts_destroy(struct tcf_exts *exts)
6431 {
6432 #ifdef CONFIG_NET_CLS_ACT
6433 - tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
6434 - kfree(exts->actions);
6435 + if (exts->actions) {
6436 + tcf_action_destroy(exts->actions, TCA_ACT_UNBIND);
6437 + kfree(exts->actions);
6438 + }
6439 exts->nr_actions = 0;
6440 #endif
6441 }
6442 diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
6443 index b06cc5e50412..84fdc4857771 100644
6444 --- a/net/sched/sch_api.c
6445 +++ b/net/sched/sch_api.c
6446 @@ -1308,7 +1308,8 @@ check_loop_fn(struct Qdisc *q, unsigned long cl, struct qdisc_walker *w)
6447 }
6448
6449 const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
6450 - [TCA_KIND] = { .type = NLA_STRING },
6451 + [TCA_KIND] = { .type = NLA_NUL_STRING,
6452 + .len = IFNAMSIZ - 1 },
6453 [TCA_RATE] = { .type = NLA_BINARY,
6454 .len = sizeof(struct tc_estimator) },
6455 [TCA_STAB] = { .type = NLA_NESTED },
6456 diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
6457 index 4dfe10b9f96c..86350fe5cfc8 100644
6458 --- a/net/sched/sch_netem.c
6459 +++ b/net/sched/sch_netem.c
6460 @@ -749,7 +749,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
6461 struct disttable *d;
6462 int i;
6463
6464 - if (n > NETEM_DIST_MAX)
6465 + if (!n || n > NETEM_DIST_MAX)
6466 return -EINVAL;
6467
6468 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
6469 diff --git a/net/wireless/util.c b/net/wireless/util.c
6470 index c14e8f6e5e19..d641d81da759 100644
6471 --- a/net/wireless/util.c
6472 +++ b/net/wireless/util.c
6473 @@ -930,6 +930,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
6474 }
6475
6476 cfg80211_process_rdev_events(rdev);
6477 + cfg80211_mlme_purge_registrations(dev->ieee80211_ptr);
6478 }
6479
6480 err = rdev_change_virtual_intf(rdev, dev, ntype, params);
6481 diff --git a/scripts/gcc-plugins/randomize_layout_plugin.c b/scripts/gcc-plugins/randomize_layout_plugin.c
6482 index 6d5bbd31db7f..bd29e4e7a524 100644
6483 --- a/scripts/gcc-plugins/randomize_layout_plugin.c
6484 +++ b/scripts/gcc-plugins/randomize_layout_plugin.c
6485 @@ -443,13 +443,13 @@ static int is_pure_ops_struct(const_tree node)
6486 if (node == fieldtype)
6487 continue;
6488
6489 - if (!is_fptr(fieldtype))
6490 - return 0;
6491 -
6492 - if (code != RECORD_TYPE && code != UNION_TYPE)
6493 + if (code == RECORD_TYPE || code == UNION_TYPE) {
6494 + if (!is_pure_ops_struct(fieldtype))
6495 + return 0;
6496 continue;
6497 + }
6498
6499 - if (!is_pure_ops_struct(fieldtype))
6500 + if (!is_fptr(fieldtype))
6501 return 0;
6502 }
6503
6504 diff --git a/sound/firewire/motu/motu.c b/sound/firewire/motu/motu.c
6505 index 743015e87a96..e240fdfcae31 100644
6506 --- a/sound/firewire/motu/motu.c
6507 +++ b/sound/firewire/motu/motu.c
6508 @@ -255,6 +255,17 @@ static const struct snd_motu_spec motu_audio_express = {
6509 .analog_out_ports = 4,
6510 };
6511
6512 +static const struct snd_motu_spec motu_4pre = {
6513 + .name = "4pre",
6514 + .protocol = &snd_motu_protocol_v3,
6515 + .flags = SND_MOTU_SPEC_SUPPORT_CLOCK_X2 |
6516 + SND_MOTU_SPEC_TX_MICINST_CHUNK |
6517 + SND_MOTU_SPEC_TX_RETURN_CHUNK |
6518 + SND_MOTU_SPEC_RX_SEPARETED_MAIN,
6519 + .analog_in_ports = 2,
6520 + .analog_out_ports = 2,
6521 +};
6522 +
6523 #define SND_MOTU_DEV_ENTRY(model, data) \
6524 { \
6525 .match_flags = IEEE1394_MATCH_VENDOR_ID | \
6526 @@ -272,6 +283,7 @@ static const struct ieee1394_device_id motu_id_table[] = {
6527 SND_MOTU_DEV_ENTRY(0x000015, &motu_828mk3), /* FireWire only. */
6528 SND_MOTU_DEV_ENTRY(0x000035, &motu_828mk3), /* Hybrid. */
6529 SND_MOTU_DEV_ENTRY(0x000033, &motu_audio_express),
6530 + SND_MOTU_DEV_ENTRY(0x000045, &motu_4pre),
6531 { }
6532 };
6533 MODULE_DEVICE_TABLE(ieee1394, motu_id_table);
6534 diff --git a/sound/firewire/tascam/tascam-pcm.c b/sound/firewire/tascam/tascam-pcm.c
6535 index e4cc8990e195..9e58633e2dea 100644
6536 --- a/sound/firewire/tascam/tascam-pcm.c
6537 +++ b/sound/firewire/tascam/tascam-pcm.c
6538 @@ -57,6 +57,9 @@ static int pcm_open(struct snd_pcm_substream *substream)
6539 goto err_locked;
6540
6541 err = snd_tscm_stream_get_clock(tscm, &clock);
6542 + if (err < 0)
6543 + goto err_locked;
6544 +
6545 if (clock != SND_TSCM_CLOCK_INTERNAL ||
6546 amdtp_stream_pcm_running(&tscm->rx_stream) ||
6547 amdtp_stream_pcm_running(&tscm->tx_stream)) {
6548 diff --git a/sound/firewire/tascam/tascam-stream.c b/sound/firewire/tascam/tascam-stream.c
6549 index f1657a4e0621..a1308f12a65b 100644
6550 --- a/sound/firewire/tascam/tascam-stream.c
6551 +++ b/sound/firewire/tascam/tascam-stream.c
6552 @@ -9,20 +9,37 @@
6553 #include <linux/delay.h>
6554 #include "tascam.h"
6555
6556 +#define CLOCK_STATUS_MASK 0xffff0000
6557 +#define CLOCK_CONFIG_MASK 0x0000ffff
6558 +
6559 #define CALLBACK_TIMEOUT 500
6560
6561 static int get_clock(struct snd_tscm *tscm, u32 *data)
6562 {
6563 + int trial = 0;
6564 __be32 reg;
6565 int err;
6566
6567 - err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
6568 - TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
6569 - &reg, sizeof(reg), 0);
6570 - if (err >= 0)
6571 + while (trial++ < 5) {
6572 + err = snd_fw_transaction(tscm->unit, TCODE_READ_QUADLET_REQUEST,
6573 + TSCM_ADDR_BASE + TSCM_OFFSET_CLOCK_STATUS,
6574 + &reg, sizeof(reg), 0);
6575 + if (err < 0)
6576 + return err;
6577 +
6578 *data = be32_to_cpu(reg);
6579 + if (*data & CLOCK_STATUS_MASK)
6580 + break;
6581
6582 - return err;
6583 + // In intermediate state after changing clock status.
6584 + msleep(50);
6585 + }
6586 +
6587 + // Still in the intermediate state.
6588 + if (trial >= 5)
6589 + return -EAGAIN;
6590 +
6591 + return 0;
6592 }
6593
6594 static int set_clock(struct snd_tscm *tscm, unsigned int rate,
6595 @@ -35,7 +52,7 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
6596 err = get_clock(tscm, &data);
6597 if (err < 0)
6598 return err;
6599 - data &= 0x0000ffff;
6600 + data &= CLOCK_CONFIG_MASK;
6601
6602 if (rate > 0) {
6603 data &= 0x000000ff;
6604 @@ -80,17 +97,14 @@ static int set_clock(struct snd_tscm *tscm, unsigned int rate,
6605
6606 int snd_tscm_stream_get_rate(struct snd_tscm *tscm, unsigned int *rate)
6607 {
6608 - u32 data = 0x0;
6609 - unsigned int trials = 0;
6610 + u32 data;
6611 int err;
6612
6613 - while (data == 0x0 || trials++ < 5) {
6614 - err = get_clock(tscm, &data);
6615 - if (err < 0)
6616 - return err;
6617 + err = get_clock(tscm, &data);
6618 + if (err < 0)
6619 + return err;
6620
6621 - data = (data & 0xff000000) >> 24;
6622 - }
6623 + data = (data & 0xff000000) >> 24;
6624
6625 /* Check base rate. */
6626 if ((data & 0x0f) == 0x01)
6627 diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
6628 index 74244d8e2909..e858b6fa0c3a 100644
6629 --- a/sound/hda/hdac_controller.c
6630 +++ b/sound/hda/hdac_controller.c
6631 @@ -443,6 +443,8 @@ static void azx_int_disable(struct hdac_bus *bus)
6632 list_for_each_entry(azx_dev, &bus->stream_list, list)
6633 snd_hdac_stream_updateb(azx_dev, SD_CTL, SD_INT_MASK, 0);
6634
6635 + synchronize_irq(bus->irq);
6636 +
6637 /* disable SIE for all streams */
6638 snd_hdac_chip_writeb(bus, INTCTL, 0);
6639
6640 diff --git a/sound/i2c/other/ak4xxx-adda.c b/sound/i2c/other/ak4xxx-adda.c
6641 index 7f2761a2e7c8..971197c34fce 100644
6642 --- a/sound/i2c/other/ak4xxx-adda.c
6643 +++ b/sound/i2c/other/ak4xxx-adda.c
6644 @@ -789,11 +789,12 @@ static int build_adc_controls(struct snd_akm4xxx *ak)
6645 return err;
6646
6647 memset(&knew, 0, sizeof(knew));
6648 - knew.name = ak->adc_info[mixer_ch].selector_name;
6649 - if (!knew.name) {
6650 + if (!ak->adc_info ||
6651 + !ak->adc_info[mixer_ch].selector_name) {
6652 knew.name = "Capture Channel";
6653 knew.index = mixer_ch + ak->idx_offset * 2;
6654 - }
6655 + } else
6656 + knew.name = ak->adc_info[mixer_ch].selector_name;
6657
6658 knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
6659 knew.info = ak4xxx_capture_source_info;
6660 diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
6661 index a41c1bec7c88..8fcb421193e0 100644
6662 --- a/sound/pci/hda/hda_controller.c
6663 +++ b/sound/pci/hda/hda_controller.c
6664 @@ -877,10 +877,13 @@ static int azx_rirb_get_response(struct hdac_bus *bus, unsigned int addr,
6665 */
6666 if (hbus->allow_bus_reset && !hbus->response_reset && !hbus->in_reset) {
6667 hbus->response_reset = 1;
6668 + dev_err(chip->card->dev,
6669 + "No response from codec, resetting bus: last cmd=0x%08x\n",
6670 + bus->last_cmd[addr]);
6671 return -EAGAIN; /* give a chance to retry */
6672 }
6673
6674 - dev_err(chip->card->dev,
6675 + dev_WARN(chip->card->dev,
6676 "azx_get_response timeout, switching to single_cmd mode: last cmd=0x%08x\n",
6677 bus->last_cmd[addr]);
6678 chip->single_cmd = 1;
6679 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6680 index 0b24c5ce2fd6..bfc45086cf79 100644
6681 --- a/sound/pci/hda/hda_intel.c
6682 +++ b/sound/pci/hda/hda_intel.c
6683 @@ -1455,9 +1455,9 @@ static int azx_free(struct azx *chip)
6684 }
6685
6686 if (bus->chip_init) {
6687 + azx_stop_chip(chip);
6688 azx_clear_irq_pending(chip);
6689 azx_stop_all_streams(chip);
6690 - azx_stop_chip(chip);
6691 }
6692
6693 if (bus->irq >= 0)
6694 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
6695 index e4fbfb5557ab..107ec7f3e221 100644
6696 --- a/sound/pci/hda/patch_hdmi.c
6697 +++ b/sound/pci/hda/patch_hdmi.c
6698 @@ -2583,6 +2583,8 @@ static void i915_pin_cvt_fixup(struct hda_codec *codec,
6699 /* precondition and allocation for Intel codecs */
6700 static int alloc_intel_hdmi(struct hda_codec *codec)
6701 {
6702 + int err;
6703 +
6704 /* requires i915 binding */
6705 if (!codec->bus->core.audio_component) {
6706 codec_info(codec, "No i915 binding for Intel HDMI/DP codec\n");
6707 @@ -2591,7 +2593,12 @@ static int alloc_intel_hdmi(struct hda_codec *codec)
6708 return -ENODEV;
6709 }
6710
6711 - return alloc_generic_hdmi(codec);
6712 + err = alloc_generic_hdmi(codec);
6713 + if (err < 0)
6714 + return err;
6715 + /* no need to handle unsol events */
6716 + codec->patch_ops.unsol_event = NULL;
6717 + return 0;
6718 }
6719
6720 /* parse and post-process for Intel codecs */
6721 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6722 index 7f74ebee8c2d..e1b08d6f2a51 100644
6723 --- a/sound/pci/hda/patch_realtek.c
6724 +++ b/sound/pci/hda/patch_realtek.c
6725 @@ -1057,6 +1057,9 @@ static const struct snd_pci_quirk beep_white_list[] = {
6726 SND_PCI_QUIRK(0x1043, 0x834a, "EeePC", 1),
6727 SND_PCI_QUIRK(0x1458, 0xa002, "GA-MA790X", 1),
6728 SND_PCI_QUIRK(0x8086, 0xd613, "Intel", 1),
6729 + /* blacklist -- no beep available */
6730 + SND_PCI_QUIRK(0x17aa, 0x309e, "Lenovo ThinkCentre M73", 0),
6731 + SND_PCI_QUIRK(0x17aa, 0x30a3, "Lenovo ThinkCentre M93", 0),
6732 {}
6733 };
6734
6735 @@ -5676,6 +5679,7 @@ enum {
6736 ALC256_FIXUP_ASUS_MIC_NO_PRESENCE,
6737 ALC299_FIXUP_PREDATOR_SPK,
6738 ALC294_FIXUP_ASUS_INTSPK_HEADSET_MIC,
6739 + ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE,
6740 };
6741
6742 static const struct hda_fixup alc269_fixups[] = {
6743 @@ -6714,6 +6718,16 @@ static const struct hda_fixup alc269_fixups[] = {
6744 .chained = true,
6745 .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
6746 },
6747 + [ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE] = {
6748 + .type = HDA_FIXUP_PINS,
6749 + .v.pins = (const struct hda_pintbl[]) {
6750 + { 0x19, 0x04a11040 },
6751 + { 0x21, 0x04211020 },
6752 + { }
6753 + },
6754 + .chained = true,
6755 + .chain_id = ALC256_FIXUP_ASUS_HEADSET_MODE
6756 + },
6757 };
6758
6759 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6760 @@ -6977,6 +6991,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6761 SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
6762 SND_PCI_QUIRK(0x19e5, 0x3204, "Huawei MBXP", ALC256_FIXUP_HUAWEI_MBXP_PINS),
6763 SND_PCI_QUIRK(0x1b7d, 0xa831, "Ordissimo EVE2 ", ALC269VB_FIXUP_ORDISSIMO_EVE2), /* Also known as Malata PC-B1303 */
6764 + SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
6765
6766 #if 0
6767 /* Below is a quirk table taken from the old code.
6768 @@ -7141,6 +7156,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
6769 {.id = ALC255_FIXUP_DELL_HEADSET_MIC, .name = "alc255-dell-headset"},
6770 {.id = ALC295_FIXUP_HP_X360, .name = "alc295-hp-x360"},
6771 {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
6772 + {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
6773 {}
6774 };
6775 #define ALC225_STANDARD_PINS \
6776 diff --git a/sound/soc/codecs/es8316.c b/sound/soc/codecs/es8316.c
6777 index e97d12d578b0..9ebe77c3784a 100644
6778 --- a/sound/soc/codecs/es8316.c
6779 +++ b/sound/soc/codecs/es8316.c
6780 @@ -46,7 +46,10 @@ static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(adc_vol_tlv, -9600, 50, 1);
6781 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_max_gain_tlv, -650, 150, 0);
6782 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_min_gain_tlv, -1200, 150, 0);
6783 static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(alc_target_tlv, -1650, 150, 0);
6784 -static const SNDRV_CTL_TLVD_DECLARE_DB_SCALE(hpmixer_gain_tlv, -1200, 150, 0);
6785 +static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(hpmixer_gain_tlv,
6786 + 0, 4, TLV_DB_SCALE_ITEM(-1200, 150, 0),
6787 + 8, 11, TLV_DB_SCALE_ITEM(-450, 150, 0),
6788 +);
6789
6790 static const SNDRV_CTL_TLVD_DECLARE_DB_RANGE(adc_pga_gain_tlv,
6791 0, 0, TLV_DB_SCALE_ITEM(-350, 0, 0),
6792 @@ -84,7 +87,7 @@ static const struct snd_kcontrol_new es8316_snd_controls[] = {
6793 SOC_DOUBLE_TLV("Headphone Playback Volume", ES8316_CPHP_ICAL_VOL,
6794 4, 0, 3, 1, hpout_vol_tlv),
6795 SOC_DOUBLE_TLV("Headphone Mixer Volume", ES8316_HPMIX_VOL,
6796 - 0, 4, 7, 0, hpmixer_gain_tlv),
6797 + 0, 4, 11, 0, hpmixer_gain_tlv),
6798
6799 SOC_ENUM("Playback Polarity", dacpol),
6800 SOC_DOUBLE_R_TLV("DAC Playback Volume", ES8316_DAC_VOLL,
6801 diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
6802 index 60764f6201b1..18cddf1729a6 100644
6803 --- a/sound/soc/codecs/sgtl5000.c
6804 +++ b/sound/soc/codecs/sgtl5000.c
6805 @@ -1165,12 +1165,17 @@ static int sgtl5000_set_power_regs(struct snd_soc_component *component)
6806 SGTL5000_INT_OSC_EN);
6807 /* Enable VDDC charge pump */
6808 ana_pwr |= SGTL5000_VDDC_CHRGPMP_POWERUP;
6809 - } else if (vddio >= 3100 && vdda >= 3100) {
6810 + } else {
6811 ana_pwr &= ~SGTL5000_VDDC_CHRGPMP_POWERUP;
6812 - /* VDDC use VDDIO rail */
6813 - lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
6814 - lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
6815 - SGTL5000_VDDC_MAN_ASSN_SHIFT;
6816 + /*
6817 + * if vddio == vdda the source of charge pump should be
6818 + * assigned manually to VDDIO
6819 + */
6820 + if (vddio == vdda) {
6821 + lreg_ctrl |= SGTL5000_VDDC_ASSN_OVRD;
6822 + lreg_ctrl |= SGTL5000_VDDC_MAN_ASSN_VDDIO <<
6823 + SGTL5000_VDDC_MAN_ASSN_SHIFT;
6824 + }
6825 }
6826
6827 snd_soc_component_write(component, SGTL5000_CHIP_LINREG_CTRL, lreg_ctrl);
6828 @@ -1280,6 +1285,7 @@ static int sgtl5000_probe(struct snd_soc_component *component)
6829 int ret;
6830 u16 reg;
6831 struct sgtl5000_priv *sgtl5000 = snd_soc_component_get_drvdata(component);
6832 + unsigned int zcd_mask = SGTL5000_HP_ZCD_EN | SGTL5000_ADC_ZCD_EN;
6833
6834 /* power up sgtl5000 */
6835 ret = sgtl5000_set_power_regs(component);
6836 @@ -1305,9 +1311,8 @@ static int sgtl5000_probe(struct snd_soc_component *component)
6837 reg = ((sgtl5000->lrclk_strength) << SGTL5000_PAD_I2S_LRCLK_SHIFT | 0x5f);
6838 snd_soc_component_write(component, SGTL5000_CHIP_PAD_STRENGTH, reg);
6839
6840 - snd_soc_component_write(component, SGTL5000_CHIP_ANA_CTRL,
6841 - SGTL5000_HP_ZCD_EN |
6842 - SGTL5000_ADC_ZCD_EN);
6843 + snd_soc_component_update_bits(component, SGTL5000_CHIP_ANA_CTRL,
6844 + zcd_mask, zcd_mask);
6845
6846 snd_soc_component_update_bits(component, SGTL5000_CHIP_MIC_CTRL,
6847 SGTL5000_BIAS_R_MASK,
6848 diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
6849 index bf92d36b8f8a..3c75dcf91741 100644
6850 --- a/sound/soc/codecs/tlv320aic31xx.c
6851 +++ b/sound/soc/codecs/tlv320aic31xx.c
6852 @@ -1441,7 +1441,8 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
6853 aic31xx->gpio_reset = devm_gpiod_get_optional(aic31xx->dev, "reset",
6854 GPIOD_OUT_LOW);
6855 if (IS_ERR(aic31xx->gpio_reset)) {
6856 - dev_err(aic31xx->dev, "not able to acquire gpio\n");
6857 + if (PTR_ERR(aic31xx->gpio_reset) != -EPROBE_DEFER)
6858 + dev_err(aic31xx->dev, "not able to acquire gpio\n");
6859 return PTR_ERR(aic31xx->gpio_reset);
6860 }
6861
6862 @@ -1452,7 +1453,9 @@ static int aic31xx_i2c_probe(struct i2c_client *i2c,
6863 ARRAY_SIZE(aic31xx->supplies),
6864 aic31xx->supplies);
6865 if (ret) {
6866 - dev_err(aic31xx->dev, "Failed to request supplies: %d\n", ret);
6867 + if (ret != -EPROBE_DEFER)
6868 + dev_err(aic31xx->dev,
6869 + "Failed to request supplies: %d\n", ret);
6870 return ret;
6871 }
6872
6873 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
6874 index 09b2967befd9..d83be26d6446 100644
6875 --- a/sound/soc/fsl/fsl_ssi.c
6876 +++ b/sound/soc/fsl/fsl_ssi.c
6877 @@ -799,15 +799,6 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
6878 u32 wl = SSI_SxCCR_WL(sample_size);
6879 int ret;
6880
6881 - /*
6882 - * SSI is properly configured if it is enabled and running in
6883 - * the synchronous mode; Note that AC97 mode is an exception
6884 - * that should set separate configurations for STCCR and SRCCR
6885 - * despite running in the synchronous mode.
6886 - */
6887 - if (ssi->streams && ssi->synchronous)
6888 - return 0;
6889 -
6890 if (fsl_ssi_is_i2s_master(ssi)) {
6891 ret = fsl_ssi_set_bclk(substream, dai, hw_params);
6892 if (ret)
6893 @@ -823,6 +814,15 @@ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream,
6894 }
6895 }
6896
6897 + /*
6898 + * SSI is properly configured if it is enabled and running in
6899 + * the synchronous mode; Note that AC97 mode is an exception
6900 + * that should set separate configurations for STCCR and SRCCR
6901 + * despite running in the synchronous mode.
6902 + */
6903 + if (ssi->streams && ssi->synchronous)
6904 + return 0;
6905 +
6906 if (!fsl_ssi_is_ac97(ssi)) {
6907 /*
6908 * Keep the ssi->i2s_net intact while having a local variable
6909 diff --git a/sound/soc/intel/common/sst-ipc.c b/sound/soc/intel/common/sst-ipc.c
6910 index dcff13802c00..771734fd7707 100644
6911 --- a/sound/soc/intel/common/sst-ipc.c
6912 +++ b/sound/soc/intel/common/sst-ipc.c
6913 @@ -231,6 +231,8 @@ struct ipc_message *sst_ipc_reply_find_msg(struct sst_generic_ipc *ipc,
6914
6915 if (ipc->ops.reply_msg_match != NULL)
6916 header = ipc->ops.reply_msg_match(header, &mask);
6917 + else
6918 + mask = (u64)-1;
6919
6920 if (list_empty(&ipc->rx_list)) {
6921 dev_err(ipc->dev, "error: rx list empty but received 0x%llx\n",
6922 diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
6923 index 5d7ac2ee7a3c..faf1cba57abb 100644
6924 --- a/sound/soc/intel/skylake/skl-debug.c
6925 +++ b/sound/soc/intel/skylake/skl-debug.c
6926 @@ -196,7 +196,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
6927 memset(d->fw_read_buff, 0, FW_REG_BUF);
6928
6929 if (w0_stat_sz > 0)
6930 - __iowrite32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
6931 + __ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
6932
6933 for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
6934 ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
6935 diff --git a/sound/soc/intel/skylake/skl-nhlt.c b/sound/soc/intel/skylake/skl-nhlt.c
6936 index 01a050cf8775..3cef2ebfd8be 100644
6937 --- a/sound/soc/intel/skylake/skl-nhlt.c
6938 +++ b/sound/soc/intel/skylake/skl-nhlt.c
6939 @@ -231,7 +231,7 @@ int skl_nhlt_update_topology_bin(struct skl *skl)
6940 struct hdac_bus *bus = skl_to_bus(skl);
6941 struct device *dev = bus->dev;
6942
6943 - dev_dbg(dev, "oem_id %.6s, oem_table_id %8s oem_revision %d\n",
6944 + dev_dbg(dev, "oem_id %.6s, oem_table_id %.8s oem_revision %d\n",
6945 nhlt->header.oem_id, nhlt->header.oem_table_id,
6946 nhlt->header.oem_revision);
6947
6948 diff --git a/sound/soc/sh/rcar/adg.c b/sound/soc/sh/rcar/adg.c
6949 index 051f96405346..549a137878a6 100644
6950 --- a/sound/soc/sh/rcar/adg.c
6951 +++ b/sound/soc/sh/rcar/adg.c
6952 @@ -30,6 +30,7 @@ struct rsnd_adg {
6953 struct clk *clkout[CLKOUTMAX];
6954 struct clk_onecell_data onecell;
6955 struct rsnd_mod mod;
6956 + int clk_rate[CLKMAX];
6957 u32 flags;
6958 u32 ckr;
6959 u32 rbga;
6960 @@ -113,9 +114,9 @@ static void __rsnd_adg_get_timesel_ratio(struct rsnd_priv *priv,
6961 unsigned int val, en;
6962 unsigned int min, diff;
6963 unsigned int sel_rate[] = {
6964 - clk_get_rate(adg->clk[CLKA]), /* 0000: CLKA */
6965 - clk_get_rate(adg->clk[CLKB]), /* 0001: CLKB */
6966 - clk_get_rate(adg->clk[CLKC]), /* 0010: CLKC */
6967 + adg->clk_rate[CLKA], /* 0000: CLKA */
6968 + adg->clk_rate[CLKB], /* 0001: CLKB */
6969 + adg->clk_rate[CLKC], /* 0010: CLKC */
6970 adg->rbga_rate_for_441khz, /* 0011: RBGA */
6971 adg->rbgb_rate_for_48khz, /* 0100: RBGB */
6972 };
6973 @@ -331,7 +332,7 @@ int rsnd_adg_clk_query(struct rsnd_priv *priv, unsigned int rate)
6974 * AUDIO_CLKA/AUDIO_CLKB/AUDIO_CLKC/AUDIO_CLKI.
6975 */
6976 for_each_rsnd_clk(clk, adg, i) {
6977 - if (rate == clk_get_rate(clk))
6978 + if (rate == adg->clk_rate[i])
6979 return sel_table[i];
6980 }
6981
6982 @@ -398,10 +399,18 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
6983
6984 for_each_rsnd_clk(clk, adg, i) {
6985 ret = 0;
6986 - if (enable)
6987 + if (enable) {
6988 ret = clk_prepare_enable(clk);
6989 - else
6990 +
6991 + /*
6992 + * We shouldn't use clk_get_rate() under
6993 + * atomic context. Let's keep it when
6994 + * rsnd_adg_clk_enable() was called
6995 + */
6996 + adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
6997 + } else {
6998 clk_disable_unprepare(clk);
6999 + }
7000
7001 if (ret < 0)
7002 dev_warn(dev, "can't use clk %d\n", i);
7003 diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
7004 index 30e791a53352..232df04ca586 100644
7005 --- a/sound/soc/soc-generic-dmaengine-pcm.c
7006 +++ b/sound/soc/soc-generic-dmaengine-pcm.c
7007 @@ -313,6 +313,12 @@ static int dmaengine_pcm_new(struct snd_soc_pcm_runtime *rtd)
7008
7009 if (!dmaengine_pcm_can_report_residue(dev, pcm->chan[i]))
7010 pcm->flags |= SND_DMAENGINE_PCM_FLAG_NO_RESIDUE;
7011 +
7012 + if (rtd->pcm->streams[i].pcm->name[0] == '\0') {
7013 + strncpy(rtd->pcm->streams[i].pcm->name,
7014 + rtd->pcm->streams[i].pcm->id,
7015 + sizeof(rtd->pcm->streams[i].pcm->name));
7016 + }
7017 }
7018
7019 return 0;
7020 diff --git a/sound/soc/sunxi/sun4i-i2s.c b/sound/soc/sunxi/sun4i-i2s.c
7021 index 6173dd86c62c..18cf8404d27c 100644
7022 --- a/sound/soc/sunxi/sun4i-i2s.c
7023 +++ b/sound/soc/sunxi/sun4i-i2s.c
7024 @@ -223,10 +223,11 @@ static const struct sun4i_i2s_clk_div sun4i_i2s_mclk_div[] = {
7025 };
7026
7027 static int sun4i_i2s_get_bclk_div(struct sun4i_i2s *i2s,
7028 - unsigned int oversample_rate,
7029 + unsigned long parent_rate,
7030 + unsigned int sampling_rate,
7031 unsigned int word_size)
7032 {
7033 - int div = oversample_rate / word_size / 2;
7034 + int div = parent_rate / sampling_rate / word_size / 2;
7035 int i;
7036
7037 for (i = 0; i < ARRAY_SIZE(sun4i_i2s_bclk_div); i++) {
7038 @@ -316,8 +317,8 @@ static int sun4i_i2s_set_clk_rate(struct snd_soc_dai *dai,
7039 return -EINVAL;
7040 }
7041
7042 - bclk_div = sun4i_i2s_get_bclk_div(i2s, oversample_rate,
7043 - word_size);
7044 + bclk_div = sun4i_i2s_get_bclk_div(i2s, i2s->mclk_freq,
7045 + rate, word_size);
7046 if (bclk_div < 0) {
7047 dev_err(dai->dev, "Unsupported BCLK divider: %d\n", bclk_div);
7048 return -EINVAL;
7049 diff --git a/sound/soc/uniphier/aio-cpu.c b/sound/soc/uniphier/aio-cpu.c
7050 index ee90e6c3937c..2ae582a99b63 100644
7051 --- a/sound/soc/uniphier/aio-cpu.c
7052 +++ b/sound/soc/uniphier/aio-cpu.c
7053 @@ -424,8 +424,11 @@ int uniphier_aio_dai_suspend(struct snd_soc_dai *dai)
7054 {
7055 struct uniphier_aio *aio = uniphier_priv(dai);
7056
7057 - reset_control_assert(aio->chip->rst);
7058 - clk_disable_unprepare(aio->chip->clk);
7059 + aio->chip->num_wup_aios--;
7060 + if (!aio->chip->num_wup_aios) {
7061 + reset_control_assert(aio->chip->rst);
7062 + clk_disable_unprepare(aio->chip->clk);
7063 + }
7064
7065 return 0;
7066 }
7067 @@ -439,13 +442,15 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
7068 if (!aio->chip->active)
7069 return 0;
7070
7071 - ret = clk_prepare_enable(aio->chip->clk);
7072 - if (ret)
7073 - return ret;
7074 + if (!aio->chip->num_wup_aios) {
7075 + ret = clk_prepare_enable(aio->chip->clk);
7076 + if (ret)
7077 + return ret;
7078
7079 - ret = reset_control_deassert(aio->chip->rst);
7080 - if (ret)
7081 - goto err_out_clock;
7082 + ret = reset_control_deassert(aio->chip->rst);
7083 + if (ret)
7084 + goto err_out_clock;
7085 + }
7086
7087 aio_iecout_set_enable(aio->chip, true);
7088 aio_chip_init(aio->chip);
7089 @@ -458,7 +463,7 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
7090
7091 ret = aio_init(sub);
7092 if (ret)
7093 - goto err_out_clock;
7094 + goto err_out_reset;
7095
7096 if (!sub->setting)
7097 continue;
7098 @@ -466,11 +471,16 @@ int uniphier_aio_dai_resume(struct snd_soc_dai *dai)
7099 aio_port_reset(sub);
7100 aio_src_reset(sub);
7101 }
7102 + aio->chip->num_wup_aios++;
7103
7104 return 0;
7105
7106 +err_out_reset:
7107 + if (!aio->chip->num_wup_aios)
7108 + reset_control_assert(aio->chip->rst);
7109 err_out_clock:
7110 - clk_disable_unprepare(aio->chip->clk);
7111 + if (!aio->chip->num_wup_aios)
7112 + clk_disable_unprepare(aio->chip->clk);
7113
7114 return ret;
7115 }
7116 @@ -619,6 +629,7 @@ int uniphier_aio_probe(struct platform_device *pdev)
7117 return PTR_ERR(chip->rst);
7118
7119 chip->num_aios = chip->chip_spec->num_dais;
7120 + chip->num_wup_aios = chip->num_aios;
7121 chip->aios = devm_kcalloc(dev,
7122 chip->num_aios, sizeof(struct uniphier_aio),
7123 GFP_KERNEL);
7124 diff --git a/sound/soc/uniphier/aio.h b/sound/soc/uniphier/aio.h
7125 index ca6ccbae0ee8..a7ff7e556429 100644
7126 --- a/sound/soc/uniphier/aio.h
7127 +++ b/sound/soc/uniphier/aio.h
7128 @@ -285,6 +285,7 @@ struct uniphier_aio_chip {
7129
7130 struct uniphier_aio *aios;
7131 int num_aios;
7132 + int num_wup_aios;
7133 struct uniphier_aio_pll *plls;
7134 int num_plls;
7135
7136 diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
7137 index 35c57a4204a8..13ea63c959d3 100644
7138 --- a/sound/usb/pcm.c
7139 +++ b/sound/usb/pcm.c
7140 @@ -464,6 +464,7 @@ static int set_sync_endpoint(struct snd_usb_substream *subs,
7141 }
7142 ep = get_endpoint(alts, 1)->bEndpointAddress;
7143 if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
7144 + get_endpoint(alts, 0)->bSynchAddress != 0 &&
7145 ((is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
7146 (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
7147 dev_err(&dev->dev,
7148 diff --git a/tools/include/uapi/asm/bitsperlong.h b/tools/include/uapi/asm/bitsperlong.h
7149 index 57aaeaf8e192..edba4d93e9e6 100644
7150 --- a/tools/include/uapi/asm/bitsperlong.h
7151 +++ b/tools/include/uapi/asm/bitsperlong.h
7152 @@ -1,22 +1,22 @@
7153 /* SPDX-License-Identifier: GPL-2.0 */
7154 #if defined(__i386__) || defined(__x86_64__)
7155 -#include "../../arch/x86/include/uapi/asm/bitsperlong.h"
7156 +#include "../../../arch/x86/include/uapi/asm/bitsperlong.h"
7157 #elif defined(__aarch64__)
7158 -#include "../../arch/arm64/include/uapi/asm/bitsperlong.h"
7159 +#include "../../../arch/arm64/include/uapi/asm/bitsperlong.h"
7160 #elif defined(__powerpc__)
7161 -#include "../../arch/powerpc/include/uapi/asm/bitsperlong.h"
7162 +#include "../../../arch/powerpc/include/uapi/asm/bitsperlong.h"
7163 #elif defined(__s390__)
7164 -#include "../../arch/s390/include/uapi/asm/bitsperlong.h"
7165 +#include "../../../arch/s390/include/uapi/asm/bitsperlong.h"
7166 #elif defined(__sparc__)
7167 -#include "../../arch/sparc/include/uapi/asm/bitsperlong.h"
7168 +#include "../../../arch/sparc/include/uapi/asm/bitsperlong.h"
7169 #elif defined(__mips__)
7170 -#include "../../arch/mips/include/uapi/asm/bitsperlong.h"
7171 +#include "../../../arch/mips/include/uapi/asm/bitsperlong.h"
7172 #elif defined(__ia64__)
7173 -#include "../../arch/ia64/include/uapi/asm/bitsperlong.h"
7174 +#include "../../../arch/ia64/include/uapi/asm/bitsperlong.h"
7175 #elif defined(__riscv)
7176 -#include "../../arch/riscv/include/uapi/asm/bitsperlong.h"
7177 +#include "../../../arch/riscv/include/uapi/asm/bitsperlong.h"
7178 #elif defined(__alpha__)
7179 -#include "../../arch/alpha/include/uapi/asm/bitsperlong.h"
7180 +#include "../../../arch/alpha/include/uapi/asm/bitsperlong.h"
7181 #else
7182 #include <asm-generic/bitsperlong.h>
7183 #endif
7184 diff --git a/tools/lib/traceevent/Makefile b/tools/lib/traceevent/Makefile
7185 index 0b4e833088a4..95a43ccb6dd0 100644
7186 --- a/tools/lib/traceevent/Makefile
7187 +++ b/tools/lib/traceevent/Makefile
7188 @@ -55,15 +55,15 @@ set_plugin_dir := 1
7189
7190 # Set plugin_dir to preffered global plugin location
7191 # If we install under $HOME directory we go under
7192 -# $(HOME)/.traceevent/plugins
7193 +# $(HOME)/.local/lib/traceevent/plugins
7194 #
7195 # We dont set PLUGIN_DIR in case we install under $HOME
7196 # directory, because by default the code looks under:
7197 -# $(HOME)/.traceevent/plugins by default.
7198 +# $(HOME)/.local/lib/traceevent/plugins by default.
7199 #
7200 ifeq ($(plugin_dir),)
7201 ifeq ($(prefix),$(HOME))
7202 -override plugin_dir = $(HOME)/.traceevent/plugins
7203 +override plugin_dir = $(HOME)/.local/lib/traceevent/plugins
7204 set_plugin_dir := 0
7205 else
7206 override plugin_dir = $(libdir)/traceevent/plugins
7207 diff --git a/tools/lib/traceevent/event-plugin.c b/tools/lib/traceevent/event-plugin.c
7208 index f17e25097e1e..52874eb94ace 100644
7209 --- a/tools/lib/traceevent/event-plugin.c
7210 +++ b/tools/lib/traceevent/event-plugin.c
7211 @@ -16,7 +16,7 @@
7212 #include "event-parse.h"
7213 #include "event-utils.h"
7214
7215 -#define LOCAL_PLUGIN_DIR ".traceevent/plugins"
7216 +#define LOCAL_PLUGIN_DIR ".local/lib/traceevent/plugins/"
7217
7218 static struct registered_plugin_options {
7219 struct registered_plugin_options *next;
7220 diff --git a/tools/perf/perf.c b/tools/perf/perf.c
7221 index a11cb006f968..80f8ae8b1366 100644
7222 --- a/tools/perf/perf.c
7223 +++ b/tools/perf/perf.c
7224 @@ -439,6 +439,9 @@ int main(int argc, const char **argv)
7225
7226 srandom(time(NULL));
7227
7228 + /* Setting $PERF_CONFIG makes perf read _only_ the given config file. */
7229 + config_exclusive_filename = getenv("PERF_CONFIG");
7230 +
7231 err = perf_config(perf_default_config, NULL);
7232 if (err)
7233 return err;
7234 diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
7235 index 4ce276efe6b4..fe223fc5c1f8 100755
7236 --- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
7237 +++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
7238 @@ -29,6 +29,10 @@ if [ $err -ne 0 ] ; then
7239 exit $err
7240 fi
7241
7242 +# Do not use whatever ~/.perfconfig file, it may change the output
7243 +# via trace.{show_timestamp,show_prefix,etc}
7244 +export PERF_CONFIG=/dev/null
7245 +
7246 trace_open_vfs_getname
7247 err=$?
7248 rm -f ${file}
7249 diff --git a/tools/perf/trace/beauty/ioctl.c b/tools/perf/trace/beauty/ioctl.c
7250 index 1be3b4cf0827..82346ca06f17 100644
7251 --- a/tools/perf/trace/beauty/ioctl.c
7252 +++ b/tools/perf/trace/beauty/ioctl.c
7253 @@ -22,7 +22,7 @@
7254 static size_t ioctl__scnprintf_tty_cmd(int nr, int dir, char *bf, size_t size)
7255 {
7256 static const char *ioctl_tty_cmd[] = {
7257 - "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
7258 + [_IOC_NR(TCGETS)] = "TCGETS", "TCSETS", "TCSETSW", "TCSETSF", "TCGETA", "TCSETA", "TCSETAW",
7259 "TCSETAF", "TCSBRK", "TCXONC", "TCFLSH", "TIOCEXCL", "TIOCNXCL", "TIOCSCTTY",
7260 "TIOCGPGRP", "TIOCSPGRP", "TIOCOUTQ", "TIOCSTI", "TIOCGWINSZ", "TIOCSWINSZ",
7261 "TIOCMGET", "TIOCMBIS", "TIOCMBIC", "TIOCMSET", "TIOCGSOFTCAR", "TIOCSSOFTCAR",
7262 diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
7263 index 54c34c107cab..0c70788593c8 100644
7264 --- a/tools/perf/util/header.c
7265 +++ b/tools/perf/util/header.c
7266 @@ -2184,8 +2184,10 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
7267 /* On s390 the socket_id number is not related to the numbers of cpus.
7268 * The socket_id number might be higher than the numbers of cpus.
7269 * This depends on the configuration.
7270 + * AArch64 is the same.
7271 */
7272 - if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
7273 + if (ph->env.arch && (!strncmp(ph->env.arch, "s390", 4)
7274 + || !strncmp(ph->env.arch, "aarch64", 7)))
7275 do_core_id_test = false;
7276
7277 for (i = 0; i < (u32)cpu_nr; i++) {
7278 diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
7279 index 7ffe562e7ae7..2627b038b6f2 100644
7280 --- a/tools/perf/util/xyarray.h
7281 +++ b/tools/perf/util/xyarray.h
7282 @@ -2,6 +2,7 @@
7283 #ifndef _PERF_XYARRAY_H_
7284 #define _PERF_XYARRAY_H_ 1
7285
7286 +#include <linux/compiler.h>
7287 #include <sys/types.h>
7288
7289 struct xyarray {
7290 @@ -10,7 +11,7 @@ struct xyarray {
7291 size_t entries;
7292 size_t max_x;
7293 size_t max_y;
7294 - char contents[];
7295 + char contents[] __aligned(8);
7296 };
7297
7298 struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);