Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0135-4.14.36-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 224179 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Makefile b/Makefile
2     index 995666d5e57b..0a1f941899f4 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 14
9     -SUBLEVEL = 35
10     +SUBLEVEL = 36
11     EXTRAVERSION =
12     NAME = Petit Gorille
13    
14     diff --git a/arch/arm/boot/dts/at91sam9g25.dtsi b/arch/arm/boot/dts/at91sam9g25.dtsi
15     index a7da0dd0c98f..0898213f3bb2 100644
16     --- a/arch/arm/boot/dts/at91sam9g25.dtsi
17     +++ b/arch/arm/boot/dts/at91sam9g25.dtsi
18     @@ -21,7 +21,7 @@
19     atmel,mux-mask = <
20     /* A B C */
21     0xffffffff 0xffe0399f 0xc000001c /* pioA */
22     - 0x0007ffff 0x8000fe3f 0x00000000 /* pioB */
23     + 0x0007ffff 0x00047e3f 0x00000000 /* pioB */
24     0x80000000 0x07c0ffff 0xb83fffff /* pioC */
25     0x003fffff 0x003f8000 0x00000000 /* pioD */
26     >;
27     diff --git a/arch/arm/boot/dts/da850-lego-ev3.dts b/arch/arm/boot/dts/da850-lego-ev3.dts
28     index 413dbd5d9f64..81942ae83e1f 100644
29     --- a/arch/arm/boot/dts/da850-lego-ev3.dts
30     +++ b/arch/arm/boot/dts/da850-lego-ev3.dts
31     @@ -178,7 +178,7 @@
32     */
33     battery {
34     pinctrl-names = "default";
35     - pintctrl-0 = <&battery_pins>;
36     + pinctrl-0 = <&battery_pins>;
37     compatible = "lego,ev3-battery";
38     io-channels = <&adc 4>, <&adc 3>;
39     io-channel-names = "voltage", "current";
40     @@ -392,7 +392,7 @@
41     batt_volt_en {
42     gpio-hog;
43     gpios = <6 GPIO_ACTIVE_HIGH>;
44     - output-low;
45     + output-high;
46     };
47     };
48    
49     diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
50     index 8dbeb873e99c..35b1949a3e3c 100644
51     --- a/arch/arm/boot/dts/exynos5250.dtsi
52     +++ b/arch/arm/boot/dts/exynos5250.dtsi
53     @@ -643,7 +643,7 @@
54     power-domains = <&pd_gsc>;
55     clocks = <&clock CLK_GSCL0>;
56     clock-names = "gscl";
57     - iommu = <&sysmmu_gsc0>;
58     + iommus = <&sysmmu_gsc0>;
59     };
60    
61     gsc_1: gsc@13e10000 {
62     @@ -653,7 +653,7 @@
63     power-domains = <&pd_gsc>;
64     clocks = <&clock CLK_GSCL1>;
65     clock-names = "gscl";
66     - iommu = <&sysmmu_gsc1>;
67     + iommus = <&sysmmu_gsc1>;
68     };
69    
70     gsc_2: gsc@13e20000 {
71     @@ -663,7 +663,7 @@
72     power-domains = <&pd_gsc>;
73     clocks = <&clock CLK_GSCL2>;
74     clock-names = "gscl";
75     - iommu = <&sysmmu_gsc2>;
76     + iommus = <&sysmmu_gsc2>;
77     };
78    
79     gsc_3: gsc@13e30000 {
80     @@ -673,7 +673,7 @@
81     power-domains = <&pd_gsc>;
82     clocks = <&clock CLK_GSCL3>;
83     clock-names = "gscl";
84     - iommu = <&sysmmu_gsc3>;
85     + iommus = <&sysmmu_gsc3>;
86     };
87    
88     hdmi: hdmi@14530000 {
89     diff --git a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
90     index 7bf5aa2237c9..7de704575aee 100644
91     --- a/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
92     +++ b/arch/arm/boot/dts/mt7623n-bananapi-bpi-r2.dts
93     @@ -39,6 +39,24 @@
94     };
95     };
96    
97     + reg_3p3v: regulator-3p3v {
98     + compatible = "regulator-fixed";
99     + regulator-name = "fixed-3.3V";
100     + regulator-min-microvolt = <3300000>;
101     + regulator-max-microvolt = <3300000>;
102     + regulator-boot-on;
103     + regulator-always-on;
104     + };
105     +
106     + reg_5v: regulator-5v {
107     + compatible = "regulator-fixed";
108     + regulator-name = "fixed-5V";
109     + regulator-min-microvolt = <5000000>;
110     + regulator-max-microvolt = <5000000>;
111     + regulator-boot-on;
112     + regulator-always-on;
113     + };
114     +
115     gpio_keys {
116     compatible = "gpio-keys";
117     pinctrl-names = "default";
118     @@ -468,12 +486,14 @@
119     };
120    
121     &usb1 {
122     - vusb33-supply = <&mt6323_vusb_reg>;
123     + vusb33-supply = <&reg_3p3v>;
124     + vbus-supply = <&reg_5v>;
125     status = "okay";
126     };
127    
128     &usb2 {
129     - vusb33-supply = <&mt6323_vusb_reg>;
130     + vusb33-supply = <&reg_3p3v>;
131     + vbus-supply = <&reg_5v>;
132     status = "okay";
133     };
134    
135     diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
136     index 2fa36c525957..81b526085097 100644
137     --- a/arch/arm/boot/dts/sama5d4.dtsi
138     +++ b/arch/arm/boot/dts/sama5d4.dtsi
139     @@ -1365,7 +1365,7 @@
140     pinctrl@fc06a000 {
141     #address-cells = <1>;
142     #size-cells = <1>;
143     - compatible = "atmel,at91sam9x5-pinctrl", "atmel,at91rm9200-pinctrl", "simple-bus";
144     + compatible = "atmel,sama5d3-pinctrl", "atmel,at91sam9x5-pinctrl", "simple-bus";
145     ranges = <0xfc068000 0xfc068000 0x100
146     0xfc06a000 0xfc06a000 0x4000>;
147     /* WARNING: revisit as pin spec has changed */
148     diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
149     index 1a7e5b5d08d8..3dbbf1fffead 100644
150     --- a/arch/arm/mach-exynos/pm.c
151     +++ b/arch/arm/mach-exynos/pm.c
152     @@ -276,11 +276,7 @@ static int exynos_cpu0_enter_aftr(void)
153     goto fail;
154    
155     call_firmware_op(cpu_boot, 1);
156     -
157     - if (soc_is_exynos3250())
158     - dsb_sev();
159     - else
160     - arch_send_wakeup_ipi_mask(cpumask_of(1));
161     + dsb_sev();
162     }
163     }
164     fail:
165     diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
166     index 08b7bb7f5b74..c3c65b06ba76 100644
167     --- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
168     +++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
169     @@ -276,7 +276,7 @@
170     pinctrl-names = "default", "clk-gate";
171    
172     bus-width = <8>;
173     - max-frequency = <200000000>;
174     + max-frequency = <100000000>;
175     non-removable;
176     disable-wp;
177     cap-mmc-highspeed;
178     diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
179     index b71306947290..06629011a434 100644
180     --- a/arch/mips/include/asm/uaccess.h
181     +++ b/arch/mips/include/asm/uaccess.h
182     @@ -654,6 +654,13 @@ __clear_user(void __user *addr, __kernel_size_t size)
183     {
184     __kernel_size_t res;
185    
186     +#ifdef CONFIG_CPU_MICROMIPS
187     +/* micromips memset / bzero also clobbers t7 & t8 */
188     +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$15", "$24", "$31"
189     +#else
190     +#define bzero_clobbers "$4", "$5", "$6", __UA_t0, __UA_t1, "$31"
191     +#endif /* CONFIG_CPU_MICROMIPS */
192     +
193     if (eva_kernel_access()) {
194     __asm__ __volatile__(
195     "move\t$4, %1\n\t"
196     @@ -663,7 +670,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
197     "move\t%0, $6"
198     : "=r" (res)
199     : "r" (addr), "r" (size)
200     - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
201     + : bzero_clobbers);
202     } else {
203     might_fault();
204     __asm__ __volatile__(
205     @@ -674,7 +681,7 @@ __clear_user(void __user *addr, __kernel_size_t size)
206     "move\t%0, $6"
207     : "=r" (res)
208     : "r" (addr), "r" (size)
209     - : "$4", "$5", "$6", __UA_t0, __UA_t1, "$31");
210     + : bzero_clobbers);
211     }
212    
213     return res;
214     diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
215     index a1456664d6c2..f7327979a8f8 100644
216     --- a/arch/mips/lib/memset.S
217     +++ b/arch/mips/lib/memset.S
218     @@ -219,7 +219,7 @@
219     1: PTR_ADDIU a0, 1 /* fill bytewise */
220     R10KCBARRIER(0(ra))
221     bne t1, a0, 1b
222     - sb a1, -1(a0)
223     + EX(sb, a1, -1(a0), .Lsmall_fixup\@)
224    
225     2: jr ra /* done */
226     move a2, zero
227     @@ -252,13 +252,18 @@
228     PTR_L t0, TI_TASK($28)
229     andi a2, STORMASK
230     LONG_L t0, THREAD_BUADDR(t0)
231     - LONG_ADDU a2, t1
232     + LONG_ADDU a2, a0
233     jr ra
234     LONG_SUBU a2, t0
235    
236     .Llast_fixup\@:
237     jr ra
238     - andi v1, a2, STORMASK
239     + nop
240     +
241     +.Lsmall_fixup\@:
242     + PTR_SUBU a2, t1, a0
243     + jr ra
244     + PTR_ADDIU a2, 1
245    
246     .endm
247    
248     diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h
249     index 10daa1d56e0a..c7c63959ba91 100644
250     --- a/arch/powerpc/include/asm/barrier.h
251     +++ b/arch/powerpc/include/asm/barrier.h
252     @@ -35,7 +35,8 @@
253     #define rmb() __asm__ __volatile__ ("sync" : : : "memory")
254     #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
255    
256     -#ifdef __SUBARCH_HAS_LWSYNC
257     +/* The sub-arch has lwsync */
258     +#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
259     # define SMPWMB LWSYNC
260     #else
261     # define SMPWMB eieio
262     diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
263     index 726c23304a57..8eb3ebca02df 100644
264     --- a/arch/powerpc/include/asm/opal.h
265     +++ b/arch/powerpc/include/asm/opal.h
266     @@ -21,6 +21,9 @@
267     /* We calculate number of sg entries based on PAGE_SIZE */
268     #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
269    
270     +/* Default time to sleep or delay between OPAL_BUSY/OPAL_BUSY_EVENT loops */
271     +#define OPAL_BUSY_DELAY_MS 10
272     +
273     /* /sys/firmware/opal */
274     extern struct kobject *opal_kobj;
275    
276     diff --git a/arch/powerpc/include/asm/synch.h b/arch/powerpc/include/asm/synch.h
277     index 63e7f5a1f105..6ec546090ba1 100644
278     --- a/arch/powerpc/include/asm/synch.h
279     +++ b/arch/powerpc/include/asm/synch.h
280     @@ -6,10 +6,6 @@
281     #include <linux/stringify.h>
282     #include <asm/feature-fixups.h>
283    
284     -#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC)
285     -#define __SUBARCH_HAS_LWSYNC
286     -#endif
287     -
288     #ifndef __ASSEMBLY__
289     extern unsigned int __start___lwsync_fixup, __stop___lwsync_fixup;
290     extern void do_lwsync_fixups(unsigned long value, void *fixup_start,
291     diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
292     index 7275fed271af..f047ae1b6271 100644
293     --- a/arch/powerpc/kernel/dt_cpu_ftrs.c
294     +++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
295     @@ -86,6 +86,7 @@ static int hv_mode;
296    
297     static struct {
298     u64 lpcr;
299     + u64 lpcr_clear;
300     u64 hfscr;
301     u64 fscr;
302     } system_registers;
303     @@ -115,6 +116,8 @@ static void cpufeatures_flush_tlb(void)
304    
305     static void __restore_cpu_cpufeatures(void)
306     {
307     + u64 lpcr;
308     +
309     /*
310     * LPCR is restored by the power on engine already. It can be changed
311     * after early init e.g., by radix enable, and we have no unified API
312     @@ -127,8 +130,10 @@ static void __restore_cpu_cpufeatures(void)
313     * The best we can do to accommodate secondary boot and idle restore
314     * for now is "or" LPCR with existing.
315     */
316     -
317     - mtspr(SPRN_LPCR, system_registers.lpcr | mfspr(SPRN_LPCR));
318     + lpcr = mfspr(SPRN_LPCR);
319     + lpcr |= system_registers.lpcr;
320     + lpcr &= ~system_registers.lpcr_clear;
321     + mtspr(SPRN_LPCR, lpcr);
322     if (hv_mode) {
323     mtspr(SPRN_LPID, 0);
324     mtspr(SPRN_HFSCR, system_registers.hfscr);
325     @@ -351,8 +356,9 @@ static int __init feat_enable_mmu_hash_v3(struct dt_cpu_feature *f)
326     {
327     u64 lpcr;
328    
329     + system_registers.lpcr_clear |= (LPCR_ISL | LPCR_UPRT | LPCR_HR);
330     lpcr = mfspr(SPRN_LPCR);
331     - lpcr &= ~LPCR_ISL;
332     + lpcr &= ~(LPCR_ISL | LPCR_UPRT | LPCR_HR);
333     mtspr(SPRN_LPCR, lpcr);
334    
335     cur_cpu_spec->mmu_features |= MMU_FTRS_HASH_BASE;
336     diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
337     index 2e8d1b2b5af4..8545a9523b9b 100644
338     --- a/arch/powerpc/kernel/eeh_pe.c
339     +++ b/arch/powerpc/kernel/eeh_pe.c
340     @@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
341     eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
342    
343     /* PCI Command: 0x4 */
344     - eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
345     + eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
346     + PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
347    
348     /* Check the PCIe link is ready */
349     eeh_bridge_check_link(edev);
350     diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
351     index bebc3007a793..10b46b35c059 100644
352     --- a/arch/powerpc/kernel/kprobes.c
353     +++ b/arch/powerpc/kernel/kprobes.c
354     @@ -457,29 +457,33 @@ static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
355     }
356    
357     kretprobe_assert(ri, orig_ret_address, trampoline_address);
358     - regs->nip = orig_ret_address;
359     +
360     /*
361     - * Make LR point to the orig_ret_address.
362     - * When the 'nop' inside the kretprobe_trampoline
363     - * is optimized, we can do a 'blr' after executing the
364     - * detour buffer code.
365     + * We get here through one of two paths:
366     + * 1. by taking a trap -> kprobe_handler() -> here
367     + * 2. by optprobe branch -> optimized_callback() -> opt_pre_handler() -> here
368     + *
369     + * When going back through (1), we need regs->nip to be setup properly
370     + * as it is used to determine the return address from the trap.
371     + * For (2), since nip is not honoured with optprobes, we instead setup
372     + * the link register properly so that the subsequent 'blr' in
373     + * kretprobe_trampoline jumps back to the right instruction.
374     + *
375     + * For nip, we should set the address to the previous instruction since
376     + * we end up emulating it in kprobe_handler(), which increments the nip
377     + * again.
378     */
379     + regs->nip = orig_ret_address - 4;
380     regs->link = orig_ret_address;
381    
382     - reset_current_kprobe();
383     kretprobe_hash_unlock(current, &flags);
384     - preempt_enable_no_resched();
385    
386     hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
387     hlist_del(&ri->hlist);
388     kfree(ri);
389     }
390     - /*
391     - * By returning a non-zero value, we are telling
392     - * kprobe_handler() that we don't want the post_handler
393     - * to run (and have re-enabled preemption)
394     - */
395     - return 1;
396     +
397     + return 0;
398     }
399     NOKPROBE_SYMBOL(trampoline_probe_handler);
400    
401     diff --git a/arch/powerpc/kernel/machine_kexec_file_64.c b/arch/powerpc/kernel/machine_kexec_file_64.c
402     index 992c0d258e5d..c66132b145eb 100644
403     --- a/arch/powerpc/kernel/machine_kexec_file_64.c
404     +++ b/arch/powerpc/kernel/machine_kexec_file_64.c
405     @@ -43,7 +43,7 @@ int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
406    
407     /* We don't support crash kernels yet. */
408     if (image->type == KEXEC_TYPE_CRASH)
409     - return -ENOTSUPP;
410     + return -EOPNOTSUPP;
411    
412     for (i = 0; i < ARRAY_SIZE(kexec_file_loaders); i++) {
413     fops = kexec_file_loaders[i];
414     diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
415     index a95ea007d654..d0c0b8443dcf 100644
416     --- a/arch/powerpc/lib/feature-fixups.c
417     +++ b/arch/powerpc/lib/feature-fixups.c
418     @@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
419     unsigned int *target = (unsigned int *)branch_target(src);
420    
421     /* Branch within the section doesn't need translating */
422     - if (target < alt_start || target >= alt_end) {
423     + if (target < alt_start || target > alt_end) {
424     instr = translate_branch(dest, src);
425     if (!instr)
426     return 1;
427     diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
428     index 67ec2e927253..87687e46b48b 100644
429     --- a/arch/powerpc/mm/hash_utils_64.c
430     +++ b/arch/powerpc/mm/hash_utils_64.c
431     @@ -872,6 +872,12 @@ static void __init htab_initialize(void)
432     /* Using a hypervisor which owns the htab */
433     htab_address = NULL;
434     _SDR1 = 0;
435     + /*
436     + * On POWER9, we need to do a H_REGISTER_PROC_TBL hcall
437     + * to inform the hypervisor that we wish to use the HPT.
438     + */
439     + if (cpu_has_feature(CPU_FTR_ARCH_300))
440     + register_process_table(0, 0, 0);
441     #ifdef CONFIG_FA_DUMP
442     /*
443     * If firmware assisted dump is active firmware preserves
444     diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
445     index 9db4398ded5d..1bceb95f422d 100644
446     --- a/arch/powerpc/platforms/powernv/opal-nvram.c
447     +++ b/arch/powerpc/platforms/powernv/opal-nvram.c
448     @@ -11,6 +11,7 @@
449    
450     #define DEBUG
451    
452     +#include <linux/delay.h>
453     #include <linux/kernel.h>
454     #include <linux/init.h>
455     #include <linux/of.h>
456     @@ -56,9 +57,17 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
457    
458     while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
459     rc = opal_write_nvram(__pa(buf), count, off);
460     - if (rc == OPAL_BUSY_EVENT)
461     + if (rc == OPAL_BUSY_EVENT) {
462     + msleep(OPAL_BUSY_DELAY_MS);
463     opal_poll_events(NULL);
464     + } else if (rc == OPAL_BUSY) {
465     + msleep(OPAL_BUSY_DELAY_MS);
466     + }
467     }
468     +
469     + if (rc)
470     + return -EIO;
471     +
472     *index += count;
473     return count;
474     }
475     diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
476     index 495ba4e7336d..55e97565ed2d 100644
477     --- a/arch/powerpc/platforms/pseries/lpar.c
478     +++ b/arch/powerpc/platforms/pseries/lpar.c
479     @@ -726,15 +726,18 @@ static int pseries_lpar_resize_hpt(unsigned long shift)
480     return 0;
481     }
482    
483     -/* Actually only used for radix, so far */
484     static int pseries_lpar_register_process_table(unsigned long base,
485     unsigned long page_size, unsigned long table_size)
486     {
487     long rc;
488     - unsigned long flags = PROC_TABLE_NEW;
489     + unsigned long flags = 0;
490    
491     + if (table_size)
492     + flags |= PROC_TABLE_NEW;
493     if (radix_enabled())
494     flags |= PROC_TABLE_RADIX | PROC_TABLE_GTSE;
495     + else
496     + flags |= PROC_TABLE_HPT_SLB;
497     for (;;) {
498     rc = plpar_hcall_norets(H_REGISTER_PROC_TBL, flags, base,
499     page_size, table_size);
500     @@ -760,6 +763,7 @@ void __init hpte_init_pseries(void)
501     mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range;
502     mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all;
503     mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate;
504     + register_process_table = pseries_lpar_register_process_table;
505    
506     if (firmware_has_feature(FW_FEATURE_HPT_RESIZE))
507     mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt;
508     diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
509     index ebc244b08d67..0f89ee557b04 100644
510     --- a/arch/powerpc/sysdev/xive/native.c
511     +++ b/arch/powerpc/sysdev/xive/native.c
512     @@ -388,6 +388,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
513     if (xive_pool_vps == XIVE_INVALID_VP)
514     return;
515    
516     + /* Check if pool VP already active, if it is, pull it */
517     + if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
518     + in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
519     +
520     /* Enable the pool VP */
521     vp = xive_pool_vps + cpu;
522     pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
523     diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
524     index cf8a2d92467f..45eb5999110b 100644
525     --- a/arch/s390/hypfs/inode.c
526     +++ b/arch/s390/hypfs/inode.c
527     @@ -320,7 +320,7 @@ static void hypfs_kill_super(struct super_block *sb)
528    
529     if (sb->s_root)
530     hypfs_delete_tree(sb->s_root);
531     - if (sb_info->update_file)
532     + if (sb_info && sb_info->update_file)
533     hypfs_remove(sb_info->update_file);
534     kfree(sb->s_fs_info);
535     sb->s_fs_info = NULL;
536     diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
537     index 2db18cbbb0ea..c0197097c86e 100644
538     --- a/arch/um/os-Linux/file.c
539     +++ b/arch/um/os-Linux/file.c
540     @@ -12,6 +12,7 @@
541     #include <sys/mount.h>
542     #include <sys/socket.h>
543     #include <sys/stat.h>
544     +#include <sys/sysmacros.h>
545     #include <sys/un.h>
546     #include <sys/types.h>
547     #include <os.h>
548     diff --git a/arch/um/os-Linux/signal.c b/arch/um/os-Linux/signal.c
549     index a86d7cc2c2d8..bf0acb8aad8b 100644
550     --- a/arch/um/os-Linux/signal.c
551     +++ b/arch/um/os-Linux/signal.c
552     @@ -16,6 +16,7 @@
553     #include <os.h>
554     #include <sysdep/mcontext.h>
555     #include <um_malloc.h>
556     +#include <sys/ucontext.h>
557    
558     void (*sig_info[NSIG])(int, struct siginfo *, struct uml_pt_regs *) = {
559     [SIGTRAP] = relay_signal,
560     @@ -159,7 +160,7 @@ static void (*handlers[_NSIG])(int sig, struct siginfo *si, mcontext_t *mc) = {
561    
562     static void hard_handler(int sig, siginfo_t *si, void *p)
563     {
564     - struct ucontext *uc = p;
565     + ucontext_t *uc = p;
566     mcontext_t *mc = &uc->uc_mcontext;
567     unsigned long pending = 1UL << sig;
568    
569     diff --git a/arch/x86/um/stub_segv.c b/arch/x86/um/stub_segv.c
570     index 1518d2805ae8..27361cbb7ca9 100644
571     --- a/arch/x86/um/stub_segv.c
572     +++ b/arch/x86/um/stub_segv.c
573     @@ -6,11 +6,12 @@
574     #include <sysdep/stub.h>
575     #include <sysdep/faultinfo.h>
576     #include <sysdep/mcontext.h>
577     +#include <sys/ucontext.h>
578    
579     void __attribute__ ((__section__ (".__syscall_stub")))
580     stub_segv_handler(int sig, siginfo_t *info, void *p)
581     {
582     - struct ucontext *uc = p;
583     + ucontext_t *uc = p;
584    
585     GET_FAULTINFO_FROM_MC(*((struct faultinfo *) STUB_DATA),
586     &uc->uc_mcontext);
587     diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
588     index f896c2975545..fcd8789470d1 100644
589     --- a/arch/x86/xen/enlighten_pv.c
590     +++ b/arch/x86/xen/enlighten_pv.c
591     @@ -1258,10 +1258,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
592     */
593     __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
594    
595     - /* Work out if we support NX */
596     - get_cpu_cap(&boot_cpu_data);
597     - x86_configure_nx();
598     -
599     /* Get mfn list */
600     xen_build_dynamic_phys_to_machine();
601    
602     @@ -1271,6 +1267,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
603     */
604     xen_setup_gdt(0);
605    
606     + /* Work out if we support NX */
607     + get_cpu_cap(&boot_cpu_data);
608     + x86_configure_nx();
609     +
610     xen_init_irq_ops();
611    
612     /* Let's presume PV guests always boot on vCPU with id 0. */
613     diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
614     index 67a860790560..d56822f58ab1 100644
615     --- a/drivers/acpi/nfit/core.c
616     +++ b/drivers/acpi/nfit/core.c
617     @@ -1022,8 +1022,11 @@ static ssize_t scrub_show(struct device *dev,
618     if (nd_desc) {
619     struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
620    
621     + mutex_lock(&acpi_desc->init_mutex);
622     rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
623     - (work_busy(&acpi_desc->work)) ? "+\n" : "\n");
624     + work_busy(&acpi_desc->work)
625     + && !acpi_desc->cancel ? "+\n" : "\n");
626     + mutex_unlock(&acpi_desc->init_mutex);
627     }
628     device_unlock(dev);
629     return rc;
630     @@ -2313,7 +2316,7 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
631     struct acpi_nfit_system_address *spa = nfit_spa->spa;
632     struct nd_blk_region_desc *ndbr_desc;
633     struct nfit_mem *nfit_mem;
634     - int blk_valid = 0, rc;
635     + int rc;
636    
637     if (!nvdimm) {
638     dev_err(acpi_desc->dev, "spa%d dimm: %#x not found\n",
639     @@ -2333,15 +2336,14 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
640     if (!nfit_mem || !nfit_mem->bdw) {
641     dev_dbg(acpi_desc->dev, "spa%d %s missing bdw\n",
642     spa->range_index, nvdimm_name(nvdimm));
643     - } else {
644     - mapping->size = nfit_mem->bdw->capacity;
645     - mapping->start = nfit_mem->bdw->start_address;
646     - ndr_desc->num_lanes = nfit_mem->bdw->windows;
647     - blk_valid = 1;
648     + break;
649     }
650    
651     + mapping->size = nfit_mem->bdw->capacity;
652     + mapping->start = nfit_mem->bdw->start_address;
653     + ndr_desc->num_lanes = nfit_mem->bdw->windows;
654     ndr_desc->mapping = mapping;
655     - ndr_desc->num_mappings = blk_valid;
656     + ndr_desc->num_mappings = 1;
657     ndbr_desc = to_blk_region_desc(ndr_desc);
658     ndbr_desc->enable = acpi_nfit_blk_region_enable;
659     ndbr_desc->do_io = acpi_desc->blk_do_io;
660     diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
661     index 601e5d372887..43587ac680e4 100644
662     --- a/drivers/acpi/video_detect.c
663     +++ b/drivers/acpi/video_detect.c
664     @@ -219,6 +219,15 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
665     "3570R/370R/470R/450R/510R/4450RV"),
666     },
667     },
668     + {
669     + /* https://bugzilla.redhat.com/show_bug.cgi?id=1557060 */
670     + .callback = video_detect_force_video,
671     + .ident = "SAMSUNG 670Z5E",
672     + .matches = {
673     + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
674     + DMI_MATCH(DMI_PRODUCT_NAME, "670Z5E"),
675     + },
676     + },
677     {
678     /* https://bugzilla.redhat.com/show_bug.cgi?id=1094948 */
679     .callback = video_detect_force_video,
680     diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
681     index b9a779a4a739..efdadd153abe 100644
682     --- a/drivers/base/regmap/regmap.c
683     +++ b/drivers/base/regmap/regmap.c
684     @@ -1739,7 +1739,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg,
685     return -EINVAL;
686     if (val_len % map->format.val_bytes)
687     return -EINVAL;
688     - if (map->max_raw_write && map->max_raw_write > val_len)
689     + if (map->max_raw_write && map->max_raw_write < val_len)
690     return -E2BIG;
691    
692     map->lock(map->lock_arg);
693     diff --git a/drivers/char/random.c b/drivers/char/random.c
694     index ea0115cf5fc0..58a2ff7df392 100644
695     --- a/drivers/char/random.c
696     +++ b/drivers/char/random.c
697     @@ -427,8 +427,9 @@ struct crng_state primary_crng = {
698     * its value (from 0->1->2).
699     */
700     static int crng_init = 0;
701     -#define crng_ready() (likely(crng_init > 0))
702     +#define crng_ready() (likely(crng_init > 1))
703     static int crng_init_cnt = 0;
704     +static unsigned long crng_global_init_time = 0;
705     #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE)
706     static void _extract_crng(struct crng_state *crng,
707     __u8 out[CHACHA20_BLOCK_SIZE]);
708     @@ -732,7 +733,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
709    
710     static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
711     {
712     - const int nbits_max = (int)(~0U >> (ENTROPY_SHIFT + 1));
713     + const int nbits_max = r->poolinfo->poolwords * 32;
714    
715     if (nbits < 0)
716     return -EINVAL;
717     @@ -786,6 +787,10 @@ static void crng_initialize(struct crng_state *crng)
718     crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
719     }
720    
721     +/*
722     + * crng_fast_load() can be called by code in the interrupt service
723     + * path. So we can't afford to dilly-dally.
724     + */
725     static int crng_fast_load(const char *cp, size_t len)
726     {
727     unsigned long flags;
728     @@ -793,7 +798,7 @@ static int crng_fast_load(const char *cp, size_t len)
729    
730     if (!spin_trylock_irqsave(&primary_crng.lock, flags))
731     return 0;
732     - if (crng_ready()) {
733     + if (crng_init != 0) {
734     spin_unlock_irqrestore(&primary_crng.lock, flags);
735     return 0;
736     }
737     @@ -812,6 +817,51 @@ static int crng_fast_load(const char *cp, size_t len)
738     return 1;
739     }
740    
741     +/*
742     + * crng_slow_load() is called by add_device_randomness, which has two
743     + * attributes. (1) We can't trust the buffer passed to it is
744     + * guaranteed to be unpredictable (so it might not have any entropy at
745     + * all), and (2) it doesn't have the performance constraints of
746     + * crng_fast_load().
747     + *
748     + * So we do something more comprehensive which is guaranteed to touch
749     + * all of the primary_crng's state, and which uses a LFSR with a
750     + * period of 255 as part of the mixing algorithm. Finally, we do
751     + * *not* advance crng_init_cnt since buffer we may get may be something
752     + * like a fixed DMI table (for example), which might very well be
753     + * unique to the machine, but is otherwise unvarying.
754     + */
755     +static int crng_slow_load(const char *cp, size_t len)
756     +{
757     + unsigned long flags;
758     + static unsigned char lfsr = 1;
759     + unsigned char tmp;
760     + unsigned i, max = CHACHA20_KEY_SIZE;
761     + const char * src_buf = cp;
762     + char * dest_buf = (char *) &primary_crng.state[4];
763     +
764     + if (!spin_trylock_irqsave(&primary_crng.lock, flags))
765     + return 0;
766     + if (crng_init != 0) {
767     + spin_unlock_irqrestore(&primary_crng.lock, flags);
768     + return 0;
769     + }
770     + if (len > max)
771     + max = len;
772     +
773     + for (i = 0; i < max ; i++) {
774     + tmp = lfsr;
775     + lfsr >>= 1;
776     + if (tmp & 1)
777     + lfsr ^= 0xE1;
778     + tmp = dest_buf[i % CHACHA20_KEY_SIZE];
779     + dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
780     + lfsr += (tmp << 3) | (tmp >> 5);
781     + }
782     + spin_unlock_irqrestore(&primary_crng.lock, flags);
783     + return 1;
784     +}
785     +
786     static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
787     {
788     unsigned long flags;
789     @@ -830,7 +880,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
790     _crng_backtrack_protect(&primary_crng, buf.block,
791     CHACHA20_KEY_SIZE);
792     }
793     - spin_lock_irqsave(&primary_crng.lock, flags);
794     + spin_lock_irqsave(&crng->lock, flags);
795     for (i = 0; i < 8; i++) {
796     unsigned long rv;
797     if (!arch_get_random_seed_long(&rv) &&
798     @@ -840,7 +890,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
799     }
800     memzero_explicit(&buf, sizeof(buf));
801     crng->init_time = jiffies;
802     - spin_unlock_irqrestore(&primary_crng.lock, flags);
803     + spin_unlock_irqrestore(&crng->lock, flags);
804     if (crng == &primary_crng && crng_init < 2) {
805     invalidate_batched_entropy();
806     crng_init = 2;
807     @@ -855,8 +905,9 @@ static void _extract_crng(struct crng_state *crng,
808     {
809     unsigned long v, flags;
810    
811     - if (crng_init > 1 &&
812     - time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))
813     + if (crng_ready() &&
814     + (time_after(crng_global_init_time, crng->init_time) ||
815     + time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
816     crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
817     spin_lock_irqsave(&crng->lock, flags);
818     if (arch_get_random_long(&v))
819     @@ -981,10 +1032,8 @@ void add_device_randomness(const void *buf, unsigned int size)
820     unsigned long time = random_get_entropy() ^ jiffies;
821     unsigned long flags;
822    
823     - if (!crng_ready()) {
824     - crng_fast_load(buf, size);
825     - return;
826     - }
827     + if (!crng_ready() && size)
828     + crng_slow_load(buf, size);
829    
830     trace_add_device_randomness(size, _RET_IP_);
831     spin_lock_irqsave(&input_pool.lock, flags);
832     @@ -1141,7 +1190,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
833     fast_mix(fast_pool);
834     add_interrupt_bench(cycles);
835    
836     - if (!crng_ready()) {
837     + if (unlikely(crng_init == 0)) {
838     if ((fast_pool->count >= 64) &&
839     crng_fast_load((char *) fast_pool->pool,
840     sizeof(fast_pool->pool))) {
841     @@ -1691,6 +1740,7 @@ static int rand_initialize(void)
842     init_std_data(&input_pool);
843     init_std_data(&blocking_pool);
844     crng_initialize(&primary_crng);
845     + crng_global_init_time = jiffies;
846    
847     #ifdef CONFIG_NUMA
848     pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
849     @@ -1877,6 +1927,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
850     input_pool.entropy_count = 0;
851     blocking_pool.entropy_count = 0;
852     return 0;
853     + case RNDRESEEDCRNG:
854     + if (!capable(CAP_SYS_ADMIN))
855     + return -EPERM;
856     + if (crng_init < 2)
857     + return -ENODATA;
858     + crng_reseed(&primary_crng, NULL);
859     + crng_global_init_time = jiffies - 1;
860     + return 0;
861     default:
862     return -EINVAL;
863     }
864     @@ -2214,7 +2272,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
865     {
866     struct entropy_store *poolp = &input_pool;
867    
868     - if (!crng_ready()) {
869     + if (unlikely(crng_init == 0)) {
870     crng_fast_load(buffer, count);
871     return;
872     }
873     diff --git a/drivers/char/tpm/tpm-interface.c b/drivers/char/tpm/tpm-interface.c
874     index 0f1dc35e7078..1d01a8f77db1 100644
875     --- a/drivers/char/tpm/tpm-interface.c
876     +++ b/drivers/char/tpm/tpm-interface.c
877     @@ -971,6 +971,10 @@ int tpm_do_selftest(struct tpm_chip *chip)
878     loops = jiffies_to_msecs(duration) / delay_msec;
879    
880     rc = tpm_continue_selftest(chip);
881     + if (rc == TPM_ERR_INVALID_POSTINIT) {
882     + chip->flags |= TPM_CHIP_FLAG_ALWAYS_POWERED;
883     + dev_info(&chip->dev, "TPM not ready (%d)\n", rc);
884     + }
885     /* This may fail if there was no TPM driver during a suspend/resume
886     * cycle; some may return 10 (BAD_ORDINAL), others 28 (FAILEDSELFTEST)
887     */
888     diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c
889     index eec52734d6ac..5f8082d89131 100644
890     --- a/drivers/clk/bcm/clk-bcm2835.c
891     +++ b/drivers/clk/bcm/clk-bcm2835.c
892     @@ -632,9 +632,7 @@ static void bcm2835_pll_off(struct clk_hw *hw)
893     const struct bcm2835_pll_data *data = pll->data;
894    
895     spin_lock(&cprman->regs_lock);
896     - cprman_write(cprman, data->cm_ctrl_reg,
897     - cprman_read(cprman, data->cm_ctrl_reg) |
898     - CM_PLL_ANARST);
899     + cprman_write(cprman, data->cm_ctrl_reg, CM_PLL_ANARST);
900     cprman_write(cprman, data->a2w_ctrl_reg,
901     cprman_read(cprman, data->a2w_ctrl_reg) |
902     A2W_PLL_CTRL_PWRDN);
903     @@ -670,6 +668,10 @@ static int bcm2835_pll_on(struct clk_hw *hw)
904     cpu_relax();
905     }
906    
907     + cprman_write(cprman, data->a2w_ctrl_reg,
908     + cprman_read(cprman, data->a2w_ctrl_reg) |
909     + A2W_PLL_CTRL_PRST_DISABLE);
910     +
911     return 0;
912     }
913    
914     diff --git a/drivers/clk/mediatek/clk-mt2701.c b/drivers/clk/mediatek/clk-mt2701.c
915     index 9598889f972b..ccfe5d30fe10 100644
916     --- a/drivers/clk/mediatek/clk-mt2701.c
917     +++ b/drivers/clk/mediatek/clk-mt2701.c
918     @@ -148,6 +148,7 @@ static const struct mtk_fixed_factor top_fixed_divs[] = {
919     FACTOR(CLK_TOP_CLK26M_D8, "clk26m_d8", "clk26m", 1, 8),
920     FACTOR(CLK_TOP_32K_INTERNAL, "32k_internal", "clk26m", 1, 793),
921     FACTOR(CLK_TOP_32K_EXTERNAL, "32k_external", "rtc32k", 1, 1),
922     + FACTOR(CLK_TOP_AXISEL_D4, "axisel_d4", "axi_sel", 1, 4),
923     };
924    
925     static const char * const axi_parents[] = {
926     @@ -857,13 +858,13 @@ static const struct mtk_gate peri_clks[] = {
927     GATE_PERI0(CLK_PERI_USB1, "usb1_ck", "usb20_sel", 11),
928     GATE_PERI0(CLK_PERI_USB0, "usb0_ck", "usb20_sel", 10),
929     GATE_PERI0(CLK_PERI_PWM, "pwm_ck", "axi_sel", 9),
930     - GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axi_sel", 8),
931     - GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axi_sel", 7),
932     - GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axi_sel", 6),
933     - GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axi_sel", 5),
934     - GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axi_sel", 4),
935     - GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axi_sel", 3),
936     - GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axi_sel", 2),
937     + GATE_PERI0(CLK_PERI_PWM7, "pwm7_ck", "axisel_d4", 8),
938     + GATE_PERI0(CLK_PERI_PWM6, "pwm6_ck", "axisel_d4", 7),
939     + GATE_PERI0(CLK_PERI_PWM5, "pwm5_ck", "axisel_d4", 6),
940     + GATE_PERI0(CLK_PERI_PWM4, "pwm4_ck", "axisel_d4", 5),
941     + GATE_PERI0(CLK_PERI_PWM3, "pwm3_ck", "axisel_d4", 4),
942     + GATE_PERI0(CLK_PERI_PWM2, "pwm2_ck", "axisel_d4", 3),
943     + GATE_PERI0(CLK_PERI_PWM1, "pwm1_ck", "axisel_d4", 2),
944     GATE_PERI0(CLK_PERI_THERM, "therm_ck", "axi_sel", 1),
945     GATE_PERI0(CLK_PERI_NFI, "nfi_ck", "nfi2x_sel", 0),
946    
947     diff --git a/drivers/clk/mvebu/armada-38x.c b/drivers/clk/mvebu/armada-38x.c
948     index 394aa6f03f01..9ff4ea63932d 100644
949     --- a/drivers/clk/mvebu/armada-38x.c
950     +++ b/drivers/clk/mvebu/armada-38x.c
951     @@ -46,11 +46,11 @@ static u32 __init armada_38x_get_tclk_freq(void __iomem *sar)
952     }
953    
954     static const u32 armada_38x_cpu_frequencies[] __initconst = {
955     - 0, 0, 0, 0,
956     - 1066 * 1000 * 1000, 0, 0, 0,
957     + 666 * 1000 * 1000, 0, 800 * 1000 * 1000, 0,
958     + 1066 * 1000 * 1000, 0, 1200 * 1000 * 1000, 0,
959     1332 * 1000 * 1000, 0, 0, 0,
960     1600 * 1000 * 1000, 0, 0, 0,
961     - 1866 * 1000 * 1000,
962     + 1866 * 1000 * 1000, 0, 0, 2000 * 1000 * 1000,
963     };
964    
965     static u32 __init armada_38x_get_cpu_freq(void __iomem *sar)
966     @@ -76,11 +76,11 @@ static const struct coreclk_ratio armada_38x_coreclk_ratios[] __initconst = {
967     };
968    
969     static const int armada_38x_cpu_l2_ratios[32][2] __initconst = {
970     - {0, 1}, {0, 1}, {0, 1}, {0, 1},
971     - {1, 2}, {0, 1}, {0, 1}, {0, 1},
972     - {1, 2}, {0, 1}, {0, 1}, {0, 1},
973     + {1, 2}, {0, 1}, {1, 2}, {0, 1},
974     + {1, 2}, {0, 1}, {1, 2}, {0, 1},
975     {1, 2}, {0, 1}, {0, 1}, {0, 1},
976     {1, 2}, {0, 1}, {0, 1}, {0, 1},
977     + {1, 2}, {0, 1}, {0, 1}, {1, 2},
978     {0, 1}, {0, 1}, {0, 1}, {0, 1},
979     {0, 1}, {0, 1}, {0, 1}, {0, 1},
980     {0, 1}, {0, 1}, {0, 1}, {0, 1},
981     @@ -91,7 +91,7 @@ static const int armada_38x_cpu_ddr_ratios[32][2] __initconst = {
982     {1, 2}, {0, 1}, {0, 1}, {0, 1},
983     {1, 2}, {0, 1}, {0, 1}, {0, 1},
984     {1, 2}, {0, 1}, {0, 1}, {0, 1},
985     - {1, 2}, {0, 1}, {0, 1}, {0, 1},
986     + {1, 2}, {0, 1}, {0, 1}, {7, 15},
987     {0, 1}, {0, 1}, {0, 1}, {0, 1},
988     {0, 1}, {0, 1}, {0, 1}, {0, 1},
989     {0, 1}, {0, 1}, {0, 1}, {0, 1},
990     diff --git a/drivers/clk/renesas/clk-sh73a0.c b/drivers/clk/renesas/clk-sh73a0.c
991     index eea38f6ea77e..3892346c4fcc 100644
992     --- a/drivers/clk/renesas/clk-sh73a0.c
993     +++ b/drivers/clk/renesas/clk-sh73a0.c
994     @@ -46,7 +46,7 @@ struct div4_clk {
995     unsigned int shift;
996     };
997    
998     -static struct div4_clk div4_clks[] = {
999     +static const struct div4_clk div4_clks[] = {
1000     { "zg", "pll0", CPG_FRQCRA, 16 },
1001     { "m3", "pll1", CPG_FRQCRA, 12 },
1002     { "b", "pll1", CPG_FRQCRA, 8 },
1003     @@ -79,7 +79,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
1004     {
1005     const struct clk_div_table *table = NULL;
1006     unsigned int shift, reg, width;
1007     - const char *parent_name;
1008     + const char *parent_name = NULL;
1009     unsigned int mult = 1;
1010     unsigned int div = 1;
1011    
1012     @@ -135,7 +135,7 @@ sh73a0_cpg_register_clock(struct device_node *np, struct sh73a0_cpg *cpg,
1013     shift = 24;
1014     width = 5;
1015     } else {
1016     - struct div4_clk *c;
1017     + const struct div4_clk *c;
1018    
1019     for (c = div4_clks; c->name; c++) {
1020     if (!strcmp(name, c->name)) {
1021     diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c
1022     index a1c3025f9df7..dcb1cb9a4572 100644
1023     --- a/drivers/cpufreq/cppc_cpufreq.c
1024     +++ b/drivers/cpufreq/cppc_cpufreq.c
1025     @@ -20,6 +20,7 @@
1026     #include <linux/cpu.h>
1027     #include <linux/cpufreq.h>
1028     #include <linux/dmi.h>
1029     +#include <linux/time.h>
1030     #include <linux/vmalloc.h>
1031    
1032     #include <asm/unaligned.h>
1033     @@ -162,6 +163,8 @@ static int cppc_cpufreq_cpu_init(struct cpufreq_policy *policy)
1034     policy->cpuinfo.max_freq = cppc_dmi_max_khz;
1035    
1036     policy->cpuinfo.transition_latency = cppc_get_transition_latency(cpu_num);
1037     + policy->transition_delay_us = cppc_get_transition_latency(cpu_num) /
1038     + NSEC_PER_USEC;
1039     policy->shared_type = cpu->shared_type;
1040    
1041     if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY)
1042     diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1043     index c00e3923d7d8..94236ec9d410 100644
1044     --- a/drivers/dma/at_xdmac.c
1045     +++ b/drivers/dma/at_xdmac.c
1046     @@ -1471,10 +1471,10 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1047     for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1048     check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1049     rmb();
1050     - initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1051     - rmb();
1052     cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1053     rmb();
1054     + initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1055     + rmb();
1056     cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1057     rmb();
1058    
1059     diff --git a/drivers/extcon/extcon-intel-cht-wc.c b/drivers/extcon/extcon-intel-cht-wc.c
1060     index 91a0023074af..60baaf693103 100644
1061     --- a/drivers/extcon/extcon-intel-cht-wc.c
1062     +++ b/drivers/extcon/extcon-intel-cht-wc.c
1063     @@ -66,6 +66,8 @@
1064    
1065     #define CHT_WC_VBUS_GPIO_CTLO 0x6e2d
1066     #define CHT_WC_VBUS_GPIO_CTLO_OUTPUT BIT(0)
1067     +#define CHT_WC_VBUS_GPIO_CTLO_DRV_OD BIT(4)
1068     +#define CHT_WC_VBUS_GPIO_CTLO_DIR_OUT BIT(5)
1069    
1070     enum cht_wc_usb_id {
1071     USB_ID_OTG,
1072     @@ -183,14 +185,15 @@ static void cht_wc_extcon_set_5v_boost(struct cht_wc_extcon_data *ext,
1073     {
1074     int ret, val;
1075    
1076     - val = enable ? CHT_WC_VBUS_GPIO_CTLO_OUTPUT : 0;
1077     -
1078     /*
1079     * The 5V boost converter is enabled through a gpio on the PMIC, since
1080     * there currently is no gpio driver we access the gpio reg directly.
1081     */
1082     - ret = regmap_update_bits(ext->regmap, CHT_WC_VBUS_GPIO_CTLO,
1083     - CHT_WC_VBUS_GPIO_CTLO_OUTPUT, val);
1084     + val = CHT_WC_VBUS_GPIO_CTLO_DRV_OD | CHT_WC_VBUS_GPIO_CTLO_DIR_OUT;
1085     + if (enable)
1086     + val |= CHT_WC_VBUS_GPIO_CTLO_OUTPUT;
1087     +
1088     + ret = regmap_write(ext->regmap, CHT_WC_VBUS_GPIO_CTLO, val);
1089     if (ret)
1090     dev_err(ext->dev, "Error writing Vbus GPIO CTLO: %d\n", ret);
1091     }
1092     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1093     index c53095b3b0fb..1ae5ae8c45a4 100644
1094     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1095     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
1096     @@ -569,6 +569,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
1097     { 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
1098     { 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
1099     { 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
1100     + { 0x1002, 0x67DF, 0x1028, 0x0774, AMDGPU_PX_QUIRK_FORCE_ATPX },
1101     { 0, 0, 0, 0, 0 },
1102     };
1103    
1104     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
1105     index 59089e027f4d..92be7f6de197 100644
1106     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
1107     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
1108     @@ -233,8 +233,10 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
1109     for (i = 0; i < list->num_entries; i++) {
1110     unsigned priority = list->array[i].priority;
1111    
1112     - list_add_tail(&list->array[i].tv.head,
1113     - &bucket[priority]);
1114     + if (!list->array[i].robj->parent)
1115     + list_add_tail(&list->array[i].tv.head,
1116     + &bucket[priority]);
1117     +
1118     list->array[i].user_pages = NULL;
1119     }
1120    
1121     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1122     index b5aa8e6f8e0b..5f892ad6476e 100644
1123     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1124     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1125     @@ -522,7 +522,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
1126     INIT_LIST_HEAD(&duplicates);
1127     amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
1128    
1129     - if (p->uf_entry.robj)
1130     + if (p->uf_entry.robj && !p->uf_entry.robj->parent)
1131     list_add(&p->uf_entry.tv.head, &p->validated);
1132    
1133     if (need_mmap_lock)
1134     diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
1135     index f508f4d01e4a..11beef7c595f 100644
1136     --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
1137     +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
1138     @@ -866,7 +866,7 @@ static void cik_sdma_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1139     amdgpu_ring_write(ring, addr & 0xfffffffc);
1140     amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1141     amdgpu_ring_write(ring, seq); /* reference */
1142     - amdgpu_ring_write(ring, 0xfffffff); /* mask */
1143     + amdgpu_ring_write(ring, 0xffffffff); /* mask */
1144     amdgpu_ring_write(ring, (0xfff << 16) | 4); /* retry count, poll interval */
1145     }
1146    
1147     diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
1148     index f2d0710258cb..9928473234a6 100644
1149     --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
1150     +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
1151     @@ -856,7 +856,7 @@ static void sdma_v2_4_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1152     amdgpu_ring_write(ring, addr & 0xfffffffc);
1153     amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1154     amdgpu_ring_write(ring, seq); /* reference */
1155     - amdgpu_ring_write(ring, 0xfffffff); /* mask */
1156     + amdgpu_ring_write(ring, 0xffffffff); /* mask */
1157     amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1158     SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1159     }
1160     diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
1161     index b1de44f22824..f5db1fad3f05 100644
1162     --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
1163     +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
1164     @@ -1099,7 +1099,7 @@ static void sdma_v3_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1165     amdgpu_ring_write(ring, addr & 0xfffffffc);
1166     amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1167     amdgpu_ring_write(ring, seq); /* reference */
1168     - amdgpu_ring_write(ring, 0xfffffff); /* mask */
1169     + amdgpu_ring_write(ring, 0xffffffff); /* mask */
1170     amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1171     SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1172     }
1173     diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1174     index fd7c72aaafa6..4e5fed7c66bf 100644
1175     --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1176     +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
1177     @@ -1136,7 +1136,7 @@ static void sdma_v4_0_ring_emit_pipeline_sync(struct amdgpu_ring *ring)
1178     amdgpu_ring_write(ring, addr & 0xfffffffc);
1179     amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
1180     amdgpu_ring_write(ring, seq); /* reference */
1181     - amdgpu_ring_write(ring, 0xfffffff); /* mask */
1182     + amdgpu_ring_write(ring, 0xffffffff); /* mask */
1183     amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) |
1184     SDMA_PKT_POLL_REGMEM_DW5_INTERVAL(4)); /* retry count, poll interval */
1185     }
1186     diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c
1187     index 4c178feeb4bd..40520a968eac 100644
1188     --- a/drivers/gpu/drm/amd/amdgpu/si.c
1189     +++ b/drivers/gpu/drm/amd/amdgpu/si.c
1190     @@ -1231,6 +1231,71 @@ static void si_detect_hw_virtualization(struct amdgpu_device *adev)
1191     adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
1192     }
1193    
1194     +static int si_get_pcie_lanes(struct amdgpu_device *adev)
1195     +{
1196     + u32 link_width_cntl;
1197     +
1198     + if (adev->flags & AMD_IS_APU)
1199     + return 0;
1200     +
1201     + link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1202     +
1203     + switch ((link_width_cntl & LC_LINK_WIDTH_RD_MASK) >> LC_LINK_WIDTH_RD_SHIFT) {
1204     + case LC_LINK_WIDTH_X1:
1205     + return 1;
1206     + case LC_LINK_WIDTH_X2:
1207     + return 2;
1208     + case LC_LINK_WIDTH_X4:
1209     + return 4;
1210     + case LC_LINK_WIDTH_X8:
1211     + return 8;
1212     + case LC_LINK_WIDTH_X0:
1213     + case LC_LINK_WIDTH_X16:
1214     + default:
1215     + return 16;
1216     + }
1217     +}
1218     +
1219     +static void si_set_pcie_lanes(struct amdgpu_device *adev, int lanes)
1220     +{
1221     + u32 link_width_cntl, mask;
1222     +
1223     + if (adev->flags & AMD_IS_APU)
1224     + return;
1225     +
1226     + switch (lanes) {
1227     + case 0:
1228     + mask = LC_LINK_WIDTH_X0;
1229     + break;
1230     + case 1:
1231     + mask = LC_LINK_WIDTH_X1;
1232     + break;
1233     + case 2:
1234     + mask = LC_LINK_WIDTH_X2;
1235     + break;
1236     + case 4:
1237     + mask = LC_LINK_WIDTH_X4;
1238     + break;
1239     + case 8:
1240     + mask = LC_LINK_WIDTH_X8;
1241     + break;
1242     + case 16:
1243     + mask = LC_LINK_WIDTH_X16;
1244     + break;
1245     + default:
1246     + DRM_ERROR("invalid pcie lane request: %d\n", lanes);
1247     + return;
1248     + }
1249     +
1250     + link_width_cntl = RREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL);
1251     + link_width_cntl &= ~LC_LINK_WIDTH_MASK;
1252     + link_width_cntl |= mask << LC_LINK_WIDTH_SHIFT;
1253     + link_width_cntl |= (LC_RECONFIG_NOW |
1254     + LC_RECONFIG_ARC_MISSING_ESCAPE);
1255     +
1256     + WREG32_PCIE_PORT(PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl);
1257     +}
1258     +
1259     static const struct amdgpu_asic_funcs si_asic_funcs =
1260     {
1261     .read_disabled_bios = &si_read_disabled_bios,
1262     @@ -1241,6 +1306,8 @@ static const struct amdgpu_asic_funcs si_asic_funcs =
1263     .get_xclk = &si_get_xclk,
1264     .set_uvd_clocks = &si_set_uvd_clocks,
1265     .set_vce_clocks = NULL,
1266     + .get_pcie_lanes = &si_get_pcie_lanes,
1267     + .set_pcie_lanes = &si_set_pcie_lanes,
1268     .get_config_memsize = &si_get_config_memsize,
1269     };
1270    
1271     diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1272     index abb0a2341a41..6f1dea157a77 100644
1273     --- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1274     +++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
1275     @@ -6374,9 +6374,9 @@ static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
1276     {
1277     u32 lane_width;
1278     u32 new_lane_width =
1279     - (amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
1280     + ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1281     u32 current_lane_width =
1282     - (amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
1283     + ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1284    
1285     if (new_lane_width != current_lane_width) {
1286     amdgpu_set_pcie_lanes(adev, new_lane_width);
1287     diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
1288     index 920c8914cec1..cc70e2470272 100644
1289     --- a/drivers/gpu/drm/i915/i915_reg.h
1290     +++ b/drivers/gpu/drm/i915/i915_reg.h
1291     @@ -6159,6 +6159,12 @@ enum {
1292     #define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4)
1293     #define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8)
1294     #define SP_CONST_ALPHA_ENABLE (1<<31)
1295     +#define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0)
1296     +#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */
1297     +#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */
1298     +#define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4)
1299     +#define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */
1300     +#define SP_SH_COS(x) (x) /* u3.7 */
1301     #define _SPAGAMC (VLV_DISPLAY_BASE + 0x721f4)
1302    
1303     #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280)
1304     @@ -6172,6 +6178,8 @@ enum {
1305     #define _SPBKEYMAXVAL (VLV_DISPLAY_BASE + 0x722a0)
1306     #define _SPBTILEOFF (VLV_DISPLAY_BASE + 0x722a4)
1307     #define _SPBCONSTALPHA (VLV_DISPLAY_BASE + 0x722a8)
1308     +#define _SPBCLRC0 (VLV_DISPLAY_BASE + 0x722d0)
1309     +#define _SPBCLRC1 (VLV_DISPLAY_BASE + 0x722d4)
1310     #define _SPBGAMC (VLV_DISPLAY_BASE + 0x722f4)
1311    
1312     #define _MMIO_VLV_SPR(pipe, plane_id, reg_a, reg_b) \
1313     @@ -6188,6 +6196,8 @@ enum {
1314     #define SPKEYMAXVAL(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAKEYMAXVAL, _SPBKEYMAXVAL)
1315     #define SPTILEOFF(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPATILEOFF, _SPBTILEOFF)
1316     #define SPCONSTALPHA(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACONSTALPHA, _SPBCONSTALPHA)
1317     +#define SPCLRC0(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC0, _SPBCLRC0)
1318     +#define SPCLRC1(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPACLRC1, _SPBCLRC1)
1319     #define SPGAMC(pipe, plane_id) _MMIO_VLV_SPR((pipe), (plane_id), _SPAGAMC, _SPBGAMC)
1320    
1321     /*
1322     diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
1323     index f8ebeb5ffb96..41e31a454604 100644
1324     --- a/drivers/gpu/drm/i915/intel_sprite.c
1325     +++ b/drivers/gpu/drm/i915/intel_sprite.c
1326     @@ -345,44 +345,87 @@ skl_plane_get_hw_state(struct intel_plane *plane)
1327     }
1328    
1329     static void
1330     -chv_update_csc(struct intel_plane *plane, uint32_t format)
1331     +chv_update_csc(const struct intel_plane_state *plane_state)
1332     {
1333     + struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1334     struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1335     + const struct drm_framebuffer *fb = plane_state->base.fb;
1336     enum plane_id plane_id = plane->id;
1337    
1338     /* Seems RGB data bypasses the CSC always */
1339     - if (!format_is_yuv(format))
1340     + if (!format_is_yuv(fb->format->format))
1341     return;
1342    
1343     /*
1344     - * BT.601 limited range YCbCr -> full range RGB
1345     + * BT.601 full range YCbCr -> full range RGB
1346     *
1347     - * |r| | 6537 4769 0| |cr |
1348     - * |g| = |-3330 4769 -1605| x |y-64|
1349     - * |b| | 0 4769 8263| |cb |
1350     + * |r| | 5743 4096 0| |cr|
1351     + * |g| = |-2925 4096 -1410| x |y |
1352     + * |b| | 0 4096 7258| |cb|
1353     *
1354     - * Cb and Cr apparently come in as signed already, so no
1355     - * need for any offset. For Y we need to remove the offset.
1356     + * Cb and Cr apparently come in as signed already,
1357     + * and we get full range data in on account of CLRC0/1
1358     */
1359     - I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(-64));
1360     + I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
1361     I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
1362     I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
1363    
1364     - I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4769) | SPCSC_C0(6537));
1365     - I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-3330) | SPCSC_C0(0));
1366     - I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1605) | SPCSC_C0(4769));
1367     - I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4769) | SPCSC_C0(0));
1368     - I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(8263));
1369     + I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(4096) | SPCSC_C0(5743));
1370     + I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(-2925) | SPCSC_C0(0));
1371     + I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(-1410) | SPCSC_C0(4096));
1372     + I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(4096) | SPCSC_C0(0));
1373     + I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(7258));
1374    
1375     - I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(940) | SPCSC_IMIN(64));
1376     - I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
1377     - I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(448) | SPCSC_IMIN(-448));
1378     + I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
1379     + I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
1380     + I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
1381    
1382     I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
1383     I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
1384     I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
1385     }
1386    
1387     +#define SIN_0 0
1388     +#define COS_0 1
1389     +
1390     +static void
1391     +vlv_update_clrc(const struct intel_plane_state *plane_state)
1392     +{
1393     + struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
1394     + struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
1395     + const struct drm_framebuffer *fb = plane_state->base.fb;
1396     + enum pipe pipe = plane->pipe;
1397     + enum plane_id plane_id = plane->id;
1398     + int contrast, brightness, sh_scale, sh_sin, sh_cos;
1399     +
1400     + if (format_is_yuv(fb->format->format)) {
1401     + /*
1402     + * Expand limited range to full range:
1403     + * Contrast is applied first and is used to expand Y range.
1404     + * Brightness is applied second and is used to remove the
1405     + * offset from Y. Saturation/hue is used to expand CbCr range.
1406     + */
1407     + contrast = DIV_ROUND_CLOSEST(255 << 6, 235 - 16);
1408     + brightness = -DIV_ROUND_CLOSEST(16 * 255, 235 - 16);
1409     + sh_scale = DIV_ROUND_CLOSEST(128 << 7, 240 - 128);
1410     + sh_sin = SIN_0 * sh_scale;
1411     + sh_cos = COS_0 * sh_scale;
1412     + } else {
1413     + /* Pass-through everything. */
1414     + contrast = 1 << 6;
1415     + brightness = 0;
1416     + sh_scale = 1 << 7;
1417     + sh_sin = SIN_0 * sh_scale;
1418     + sh_cos = COS_0 * sh_scale;
1419     + }
1420     +
1421     + /* FIXME these register are single buffered :( */
1422     + I915_WRITE_FW(SPCLRC0(pipe, plane_id),
1423     + SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
1424     + I915_WRITE_FW(SPCLRC1(pipe, plane_id),
1425     + SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
1426     +}
1427     +
1428     static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state,
1429     const struct intel_plane_state *plane_state)
1430     {
1431     @@ -476,8 +519,10 @@ vlv_update_plane(struct intel_plane *plane,
1432    
1433     spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
1434    
1435     + vlv_update_clrc(plane_state);
1436     +
1437     if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
1438     - chv_update_csc(plane, fb->format->format);
1439     + chv_update_csc(plane_state);
1440    
1441     if (key->flags) {
1442     I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
1443     diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
1444     index 32b577c776b9..58488eac8462 100644
1445     --- a/drivers/gpu/drm/radeon/radeon_device.c
1446     +++ b/drivers/gpu/drm/radeon/radeon_device.c
1447     @@ -139,6 +139,10 @@ static struct radeon_px_quirk radeon_px_quirk_list[] = {
1448     * https://bugs.freedesktop.org/show_bug.cgi?id=101491
1449     */
1450     { PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
1451     + /* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
1452     + * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
1453     + */
1454     + { PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
1455     { 0, 0, 0, 0, 0 },
1456     };
1457    
1458     diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
1459     index 97a0a639dad9..90d5b41007bf 100644
1460     --- a/drivers/gpu/drm/radeon/si_dpm.c
1461     +++ b/drivers/gpu/drm/radeon/si_dpm.c
1462     @@ -5912,9 +5912,9 @@ static void si_set_pcie_lane_width_in_smc(struct radeon_device *rdev,
1463     {
1464     u32 lane_width;
1465     u32 new_lane_width =
1466     - (radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
1467     + ((radeon_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1468     u32 current_lane_width =
1469     - (radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT;
1470     + ((radeon_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
1471    
1472     if (new_lane_width != current_lane_width) {
1473     radeon_set_pcie_lanes(rdev, new_lane_width);
1474     diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1475     index bf9ed0e63973..f1fa8d5c9b52 100644
1476     --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1477     +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
1478     @@ -1413,6 +1413,9 @@ static int vop_initial(struct vop *vop)
1479     usleep_range(10, 20);
1480     reset_control_deassert(ahb_rst);
1481    
1482     + VOP_INTR_SET_TYPE(vop, clear, INTR_MASK, 1);
1483     + VOP_INTR_SET_TYPE(vop, enable, INTR_MASK, 0);
1484     +
1485     memcpy(vop->regsbak, vop->regs, vop->len);
1486    
1487     VOP_REG_SET(vop, misc, global_regdone_en, 1);
1488     @@ -1568,17 +1571,9 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1489    
1490     mutex_init(&vop->vsync_mutex);
1491    
1492     - ret = devm_request_irq(dev, vop->irq, vop_isr,
1493     - IRQF_SHARED, dev_name(dev), vop);
1494     - if (ret)
1495     - return ret;
1496     -
1497     - /* IRQ is initially disabled; it gets enabled in power_on */
1498     - disable_irq(vop->irq);
1499     -
1500     ret = vop_create_crtc(vop);
1501     if (ret)
1502     - goto err_enable_irq;
1503     + return ret;
1504    
1505     pm_runtime_enable(&pdev->dev);
1506    
1507     @@ -1588,13 +1583,19 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
1508     goto err_disable_pm_runtime;
1509     }
1510    
1511     + ret = devm_request_irq(dev, vop->irq, vop_isr,
1512     + IRQF_SHARED, dev_name(dev), vop);
1513     + if (ret)
1514     + goto err_disable_pm_runtime;
1515     +
1516     + /* IRQ is initially disabled; it gets enabled in power_on */
1517     + disable_irq(vop->irq);
1518     +
1519     return 0;
1520    
1521     err_disable_pm_runtime:
1522     pm_runtime_disable(&pdev->dev);
1523     vop_destroy_crtc(vop);
1524     -err_enable_irq:
1525     - enable_irq(vop->irq); /* To balance out the disable_irq above */
1526     return ret;
1527     }
1528    
1529     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1530     index ca2fbe56635a..672b0be41d44 100644
1531     --- a/drivers/hid/hid-core.c
1532     +++ b/drivers/hid/hid-core.c
1533     @@ -1390,7 +1390,7 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags)
1534     * of implement() working on 8 byte chunks
1535     */
1536    
1537     - int len = hid_report_len(report) + 7;
1538     + u32 len = hid_report_len(report) + 7;
1539    
1540     return kmalloc(len, flags);
1541     }
1542     @@ -1455,7 +1455,7 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
1543     {
1544     char *buf;
1545     int ret;
1546     - int len;
1547     + u32 len;
1548    
1549     buf = hid_alloc_report_buf(report, GFP_KERNEL);
1550     if (!buf)
1551     @@ -1481,14 +1481,14 @@ void __hid_request(struct hid_device *hid, struct hid_report *report,
1552     }
1553     EXPORT_SYMBOL_GPL(__hid_request);
1554    
1555     -int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1556     +int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
1557     int interrupt)
1558     {
1559     struct hid_report_enum *report_enum = hid->report_enum + type;
1560     struct hid_report *report;
1561     struct hid_driver *hdrv;
1562     unsigned int a;
1563     - int rsize, csize = size;
1564     + u32 rsize, csize = size;
1565     u8 *cdata = data;
1566     int ret = 0;
1567    
1568     @@ -1546,7 +1546,7 @@ EXPORT_SYMBOL_GPL(hid_report_raw_event);
1569     *
1570     * This is data entry for lower layers.
1571     */
1572     -int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int interrupt)
1573     +int hid_input_report(struct hid_device *hid, int type, u8 *data, u32 size, int interrupt)
1574     {
1575     struct hid_report_enum *report_enum;
1576     struct hid_driver *hdrv;
1577     diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
1578     index 199f6a01fc62..bb984cc9753b 100644
1579     --- a/drivers/hid/hid-input.c
1580     +++ b/drivers/hid/hid-input.c
1581     @@ -387,7 +387,8 @@ static int hidinput_get_battery_property(struct power_supply *psy,
1582     break;
1583    
1584     case POWER_SUPPLY_PROP_CAPACITY:
1585     - if (dev->battery_report_type == HID_FEATURE_REPORT) {
1586     + if (dev->battery_status != HID_BATTERY_REPORTED &&
1587     + !dev->battery_avoid_query) {
1588     value = hidinput_query_battery_capacity(dev);
1589     if (value < 0)
1590     return value;
1591     @@ -403,17 +404,17 @@ static int hidinput_get_battery_property(struct power_supply *psy,
1592     break;
1593    
1594     case POWER_SUPPLY_PROP_STATUS:
1595     - if (!dev->battery_reported &&
1596     - dev->battery_report_type == HID_FEATURE_REPORT) {
1597     + if (dev->battery_status != HID_BATTERY_REPORTED &&
1598     + !dev->battery_avoid_query) {
1599     value = hidinput_query_battery_capacity(dev);
1600     if (value < 0)
1601     return value;
1602    
1603     dev->battery_capacity = value;
1604     - dev->battery_reported = true;
1605     + dev->battery_status = HID_BATTERY_QUERIED;
1606     }
1607    
1608     - if (!dev->battery_reported)
1609     + if (dev->battery_status == HID_BATTERY_UNKNOWN)
1610     val->intval = POWER_SUPPLY_STATUS_UNKNOWN;
1611     else if (dev->battery_capacity == 100)
1612     val->intval = POWER_SUPPLY_STATUS_FULL;
1613     @@ -486,6 +487,14 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
1614     dev->battery_report_type = report_type;
1615     dev->battery_report_id = field->report->id;
1616    
1617     + /*
1618     + * Stylus is normally not connected to the device and thus we
1619     + * can't query the device and get meaningful battery strength.
1620     + * We have to wait for the device to report it on its own.
1621     + */
1622     + dev->battery_avoid_query = report_type == HID_INPUT_REPORT &&
1623     + field->physical == HID_DG_STYLUS;
1624     +
1625     dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
1626     if (IS_ERR(dev->battery)) {
1627     error = PTR_ERR(dev->battery);
1628     @@ -530,9 +539,10 @@ static void hidinput_update_battery(struct hid_device *dev, int value)
1629    
1630     capacity = hidinput_scale_battery_capacity(dev, value);
1631    
1632     - if (!dev->battery_reported || capacity != dev->battery_capacity) {
1633     + if (dev->battery_status != HID_BATTERY_REPORTED ||
1634     + capacity != dev->battery_capacity) {
1635     dev->battery_capacity = capacity;
1636     - dev->battery_reported = true;
1637     + dev->battery_status = HID_BATTERY_REPORTED;
1638     power_supply_changed(dev->battery);
1639     }
1640     }
1641     @@ -1359,7 +1369,8 @@ static void hidinput_led_worker(struct work_struct *work)
1642     led_work);
1643     struct hid_field *field;
1644     struct hid_report *report;
1645     - int len, ret;
1646     + int ret;
1647     + u32 len;
1648     __u8 *buf;
1649    
1650     field = hidinput_get_led_field(hid);
1651     diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
1652     index 6598501c1ad0..c3b9bd5dba75 100644
1653     --- a/drivers/hid/hid-multitouch.c
1654     +++ b/drivers/hid/hid-multitouch.c
1655     @@ -354,7 +354,8 @@ static const struct attribute_group mt_attribute_group = {
1656     static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
1657     {
1658     struct mt_device *td = hid_get_drvdata(hdev);
1659     - int ret, size = hid_report_len(report);
1660     + int ret;
1661     + u32 size = hid_report_len(report);
1662     u8 *buf;
1663    
1664     /*
1665     @@ -1049,7 +1050,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
1666     struct hid_report_enum *re;
1667     struct mt_class *cls = &td->mtclass;
1668     char *buf;
1669     - int report_len;
1670     + u32 report_len;
1671    
1672     if (td->inputmode < 0)
1673     return;
1674     diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
1675     index ef241d66562e..cf5812188c37 100644
1676     --- a/drivers/hid/hid-rmi.c
1677     +++ b/drivers/hid/hid-rmi.c
1678     @@ -89,8 +89,8 @@ struct rmi_data {
1679     u8 *writeReport;
1680     u8 *readReport;
1681    
1682     - int input_report_size;
1683     - int output_report_size;
1684     + u32 input_report_size;
1685     + u32 output_report_size;
1686    
1687     unsigned long flags;
1688    
1689     diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
1690     index 5fbe0f81ab2e..01b5a9f01814 100644
1691     --- a/drivers/hid/hidraw.c
1692     +++ b/drivers/hid/hidraw.c
1693     @@ -192,6 +192,11 @@ static ssize_t hidraw_get_report(struct file *file, char __user *buffer, size_t
1694     int ret = 0, len;
1695     unsigned char report_number;
1696    
1697     + if (!hidraw_table[minor] || !hidraw_table[minor]->exist) {
1698     + ret = -ENODEV;
1699     + goto out;
1700     + }
1701     +
1702     dev = hidraw_table[minor]->hid;
1703    
1704     if (!dev->ll_driver->raw_request) {
1705     diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
1706     index 9145c2129a96..3535073a9a7d 100644
1707     --- a/drivers/hid/i2c-hid/i2c-hid.c
1708     +++ b/drivers/hid/i2c-hid/i2c-hid.c
1709     @@ -143,10 +143,10 @@ struct i2c_hid {
1710     * register of the HID
1711     * descriptor. */
1712     unsigned int bufsize; /* i2c buffer size */
1713     - char *inbuf; /* Input buffer */
1714     - char *rawbuf; /* Raw Input buffer */
1715     - char *cmdbuf; /* Command buffer */
1716     - char *argsbuf; /* Command arguments buffer */
1717     + u8 *inbuf; /* Input buffer */
1718     + u8 *rawbuf; /* Raw Input buffer */
1719     + u8 *cmdbuf; /* Command buffer */
1720     + u8 *argsbuf; /* Command arguments buffer */
1721    
1722     unsigned long flags; /* device flags */
1723     unsigned long quirks; /* Various quirks */
1724     @@ -450,7 +450,8 @@ static int i2c_hid_hwreset(struct i2c_client *client)
1725    
1726     static void i2c_hid_get_input(struct i2c_hid *ihid)
1727     {
1728     - int ret, ret_size;
1729     + int ret;
1730     + u32 ret_size;
1731     int size = le16_to_cpu(ihid->hdesc.wMaxInputLength);
1732    
1733     if (size > ihid->bufsize)
1734     @@ -475,7 +476,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
1735     return;
1736     }
1737    
1738     - if (ret_size > size) {
1739     + if ((ret_size > size) || (ret_size <= 2)) {
1740     dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
1741     __func__, size, ret_size);
1742     return;
1743     diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
1744     index 65f1cfbbe7fe..4c337585479e 100644
1745     --- a/drivers/hid/wacom_sys.c
1746     +++ b/drivers/hid/wacom_sys.c
1747     @@ -115,7 +115,7 @@ static void wacom_feature_mapping(struct hid_device *hdev,
1748     unsigned int equivalent_usage = wacom_equivalent_usage(usage->hid);
1749     u8 *data;
1750     int ret;
1751     - int n;
1752     + u32 n;
1753    
1754     switch (equivalent_usage) {
1755     case HID_DG_CONTACTMAX:
1756     @@ -408,7 +408,7 @@ static int wacom_set_device_mode(struct hid_device *hdev,
1757     u8 *rep_data;
1758     struct hid_report *r;
1759     struct hid_report_enum *re;
1760     - int length;
1761     + u32 length;
1762     int error = -ENOMEM, limit = 0;
1763    
1764     if (wacom_wac->mode_report < 0)
1765     diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
1766     index 70cbe1e5a3d2..c401b5b63f4c 100644
1767     --- a/drivers/hid/wacom_wac.c
1768     +++ b/drivers/hid/wacom_wac.c
1769     @@ -689,6 +689,45 @@ static int wacom_intuos_get_tool_type(int tool_id)
1770     return tool_type;
1771     }
1772    
1773     +static void wacom_exit_report(struct wacom_wac *wacom)
1774     +{
1775     + struct input_dev *input = wacom->pen_input;
1776     + struct wacom_features *features = &wacom->features;
1777     + unsigned char *data = wacom->data;
1778     + int idx = (features->type == INTUOS) ? (data[1] & 0x01) : 0;
1779     +
1780     + /*
1781     + * Reset all states otherwise we lose the initial states
1782     + * when in-prox next time
1783     + */
1784     + input_report_abs(input, ABS_X, 0);
1785     + input_report_abs(input, ABS_Y, 0);
1786     + input_report_abs(input, ABS_DISTANCE, 0);
1787     + input_report_abs(input, ABS_TILT_X, 0);
1788     + input_report_abs(input, ABS_TILT_Y, 0);
1789     + if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
1790     + input_report_key(input, BTN_LEFT, 0);
1791     + input_report_key(input, BTN_MIDDLE, 0);
1792     + input_report_key(input, BTN_RIGHT, 0);
1793     + input_report_key(input, BTN_SIDE, 0);
1794     + input_report_key(input, BTN_EXTRA, 0);
1795     + input_report_abs(input, ABS_THROTTLE, 0);
1796     + input_report_abs(input, ABS_RZ, 0);
1797     + } else {
1798     + input_report_abs(input, ABS_PRESSURE, 0);
1799     + input_report_key(input, BTN_STYLUS, 0);
1800     + input_report_key(input, BTN_STYLUS2, 0);
1801     + input_report_key(input, BTN_TOUCH, 0);
1802     + input_report_abs(input, ABS_WHEEL, 0);
1803     + if (features->type >= INTUOS3S)
1804     + input_report_abs(input, ABS_Z, 0);
1805     + }
1806     + input_report_key(input, wacom->tool[idx], 0);
1807     + input_report_abs(input, ABS_MISC, 0); /* reset tool id */
1808     + input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
1809     + wacom->id[idx] = 0;
1810     +}
1811     +
1812     static int wacom_intuos_inout(struct wacom_wac *wacom)
1813     {
1814     struct wacom_features *features = &wacom->features;
1815     @@ -741,36 +780,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
1816     if (!wacom->id[idx])
1817     return 1;
1818    
1819     - /*
1820     - * Reset all states otherwise we lose the initial states
1821     - * when in-prox next time
1822     - */
1823     - input_report_abs(input, ABS_X, 0);
1824     - input_report_abs(input, ABS_Y, 0);
1825     - input_report_abs(input, ABS_DISTANCE, 0);
1826     - input_report_abs(input, ABS_TILT_X, 0);
1827     - input_report_abs(input, ABS_TILT_Y, 0);
1828     - if (wacom->tool[idx] >= BTN_TOOL_MOUSE) {
1829     - input_report_key(input, BTN_LEFT, 0);
1830     - input_report_key(input, BTN_MIDDLE, 0);
1831     - input_report_key(input, BTN_RIGHT, 0);
1832     - input_report_key(input, BTN_SIDE, 0);
1833     - input_report_key(input, BTN_EXTRA, 0);
1834     - input_report_abs(input, ABS_THROTTLE, 0);
1835     - input_report_abs(input, ABS_RZ, 0);
1836     - } else {
1837     - input_report_abs(input, ABS_PRESSURE, 0);
1838     - input_report_key(input, BTN_STYLUS, 0);
1839     - input_report_key(input, BTN_STYLUS2, 0);
1840     - input_report_key(input, BTN_TOUCH, 0);
1841     - input_report_abs(input, ABS_WHEEL, 0);
1842     - if (features->type >= INTUOS3S)
1843     - input_report_abs(input, ABS_Z, 0);
1844     - }
1845     - input_report_key(input, wacom->tool[idx], 0);
1846     - input_report_abs(input, ABS_MISC, 0); /* reset tool id */
1847     - input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]);
1848     - wacom->id[idx] = 0;
1849     + wacom_exit_report(wacom);
1850     return 2;
1851     }
1852    
1853     @@ -1226,6 +1236,12 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
1854     if (!valid)
1855     continue;
1856    
1857     + if (!prox) {
1858     + wacom->shared->stylus_in_proximity = false;
1859     + wacom_exit_report(wacom);
1860     + input_sync(pen_input);
1861     + return;
1862     + }
1863     if (range) {
1864     /* Fix rotation alignment: userspace expects zero at left */
1865     int16_t rotation = (int16_t)get_unaligned_le16(&frame[9]);
1866     diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
1867     index 8eac00efadc1..ba8df2fde1b2 100644
1868     --- a/drivers/i2c/busses/i2c-i801.c
1869     +++ b/drivers/i2c/busses/i2c-i801.c
1870     @@ -965,8 +965,6 @@ static void i801_enable_host_notify(struct i2c_adapter *adapter)
1871     if (!(priv->features & FEATURE_HOST_NOTIFY))
1872     return;
1873    
1874     - priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
1875     -
1876     if (!(SMBSLVCMD_HST_NTFY_INTREN & priv->original_slvcmd))
1877     outb_p(SMBSLVCMD_HST_NTFY_INTREN | priv->original_slvcmd,
1878     SMBSLVCMD(priv));
1879     @@ -1614,6 +1612,10 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
1880     outb_p(inb_p(SMBAUXCTL(priv)) &
1881     ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
1882    
1883     + /* Remember original Host Notify setting */
1884     + if (priv->features & FEATURE_HOST_NOTIFY)
1885     + priv->original_slvcmd = inb_p(SMBSLVCMD(priv));
1886     +
1887     /* Default timeout in interrupt mode: 200 ms */
1888     priv->adapter.timeout = HZ / 5;
1889    
1890     @@ -1698,6 +1700,15 @@ static void i801_remove(struct pci_dev *dev)
1891     */
1892     }
1893    
1894     +static void i801_shutdown(struct pci_dev *dev)
1895     +{
1896     + struct i801_priv *priv = pci_get_drvdata(dev);
1897     +
1898     + /* Restore config registers to avoid hard hang on some systems */
1899     + i801_disable_host_notify(priv);
1900     + pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
1901     +}
1902     +
1903     #ifdef CONFIG_PM
1904     static int i801_suspend(struct device *dev)
1905     {
1906     @@ -1727,6 +1738,7 @@ static struct pci_driver i801_driver = {
1907     .id_table = i801_ids,
1908     .probe = i801_probe,
1909     .remove = i801_remove,
1910     + .shutdown = i801_shutdown,
1911     .driver = {
1912     .pm = &i801_pm_ops,
1913     },
1914     diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
1915     index d6fa38f8604f..0698d92e2656 100644
1916     --- a/drivers/infiniband/core/ucma.c
1917     +++ b/drivers/infiniband/core/ucma.c
1918     @@ -1241,6 +1241,9 @@ static int ucma_set_ib_path(struct ucma_context *ctx,
1919     if (!optlen)
1920     return -EINVAL;
1921    
1922     + if (!ctx->cm_id->device)
1923     + return -EINVAL;
1924     +
1925     memset(&sa_path, 0, sizeof(sa_path));
1926    
1927     sa_path.rec_type = SA_PATH_REC_TYPE_IB;
1928     diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
1929     index 6d48d8a93b62..538f1784863a 100644
1930     --- a/drivers/infiniband/hw/mlx5/mr.c
1931     +++ b/drivers/infiniband/hw/mlx5/mr.c
1932     @@ -1220,6 +1220,8 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1933     return ERR_PTR(-EINVAL);
1934    
1935     mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1936     + if (IS_ERR(mr))
1937     + return ERR_CAST(mr);
1938     return &mr->ibmr;
1939     }
1940     #endif
1941     diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c
1942     index afbf701dc9a7..906bacf365d4 100644
1943     --- a/drivers/infiniband/sw/rxe/rxe_verbs.c
1944     +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c
1945     @@ -712,9 +712,8 @@ static int init_send_wqe(struct rxe_qp *qp, struct ib_send_wr *ibwr,
1946     memcpy(wqe->dma.sge, ibwr->sg_list,
1947     num_sge * sizeof(struct ib_sge));
1948    
1949     - wqe->iova = (mask & WR_ATOMIC_MASK) ?
1950     - atomic_wr(ibwr)->remote_addr :
1951     - rdma_wr(ibwr)->remote_addr;
1952     + wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
1953     + mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
1954     wqe->mask = mask;
1955     wqe->dma.length = length;
1956     wqe->dma.resid = length;
1957     diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1958     index 60d7b493ed2d..299a97b7e17f 100644
1959     --- a/drivers/infiniband/ulp/srp/ib_srp.c
1960     +++ b/drivers/infiniband/ulp/srp/ib_srp.c
1961     @@ -2656,9 +2656,11 @@ static int srp_abort(struct scsi_cmnd *scmnd)
1962     ret = FAST_IO_FAIL;
1963     else
1964     ret = FAILED;
1965     - srp_free_req(ch, req, scmnd, 0);
1966     - scmnd->result = DID_ABORT << 16;
1967     - scmnd->scsi_done(scmnd);
1968     + if (ret == SUCCESS) {
1969     + srp_free_req(ch, req, scmnd, 0);
1970     + scmnd->result = DID_ABORT << 16;
1971     + scmnd->scsi_done(scmnd);
1972     + }
1973    
1974     return ret;
1975     }
1976     @@ -3428,12 +3430,10 @@ static ssize_t srp_create_target(struct device *dev,
1977     num_online_nodes());
1978     const int ch_end = ((node_idx + 1) * target->ch_count /
1979     num_online_nodes());
1980     - const int cv_start = (node_idx * ibdev->num_comp_vectors /
1981     - num_online_nodes() + target->comp_vector)
1982     - % ibdev->num_comp_vectors;
1983     - const int cv_end = ((node_idx + 1) * ibdev->num_comp_vectors /
1984     - num_online_nodes() + target->comp_vector)
1985     - % ibdev->num_comp_vectors;
1986     + const int cv_start = node_idx * ibdev->num_comp_vectors /
1987     + num_online_nodes();
1988     + const int cv_end = (node_idx + 1) * ibdev->num_comp_vectors /
1989     + num_online_nodes();
1990     int cpu_idx = 0;
1991    
1992     for_each_online_cpu(cpu) {
1993     diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
1994     index 003b4a4d4b78..d7def26ccf79 100644
1995     --- a/drivers/iommu/intel-svm.c
1996     +++ b/drivers/iommu/intel-svm.c
1997     @@ -382,6 +382,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
1998     pasid_max - 1, GFP_KERNEL);
1999     if (ret < 0) {
2000     kfree(svm);
2001     + kfree(sdev);
2002     goto out;
2003     }
2004     svm->pasid = ret;
2005     diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
2006     index 9ae71804b5dd..1c2ca8d51a70 100644
2007     --- a/drivers/irqchip/irq-gic-common.c
2008     +++ b/drivers/irqchip/irq-gic-common.c
2009     @@ -21,6 +21,8 @@
2010    
2011     #include "irq-gic-common.h"
2012    
2013     +static DEFINE_RAW_SPINLOCK(irq_controller_lock);
2014     +
2015     static const struct gic_kvm_info *gic_kvm_info;
2016    
2017     const struct gic_kvm_info *gic_get_kvm_info(void)
2018     @@ -52,11 +54,13 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
2019     u32 confoff = (irq / 16) * 4;
2020     u32 val, oldval;
2021     int ret = 0;
2022     + unsigned long flags;
2023    
2024     /*
2025     * Read current configuration register, and insert the config
2026     * for "irq", depending on "type".
2027     */
2028     + raw_spin_lock_irqsave(&irq_controller_lock, flags);
2029     val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
2030     if (type & IRQ_TYPE_LEVEL_MASK)
2031     val &= ~confmask;
2032     @@ -64,8 +68,10 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
2033     val |= confmask;
2034    
2035     /* If the current configuration is the same, then we are done */
2036     - if (val == oldval)
2037     + if (val == oldval) {
2038     + raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
2039     return 0;
2040     + }
2041    
2042     /*
2043     * Write back the new configuration, and possibly re-enable
2044     @@ -83,6 +89,7 @@ int gic_configure_irq(unsigned int irq, unsigned int type,
2045     pr_warn("GIC: PPI%d is secure or misconfigured\n",
2046     irq - 16);
2047     }
2048     + raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
2049    
2050     if (sync_access)
2051     sync_access();
2052     diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
2053     index 554d60394c06..f575110454b6 100644
2054     --- a/drivers/md/dm-crypt.c
2055     +++ b/drivers/md/dm-crypt.c
2056     @@ -148,6 +148,8 @@ struct crypt_config {
2057     mempool_t *tag_pool;
2058     unsigned tag_pool_max_sectors;
2059    
2060     + struct percpu_counter n_allocated_pages;
2061     +
2062     struct bio_set *bs;
2063     struct mutex bio_alloc_lock;
2064    
2065     @@ -219,6 +221,12 @@ struct crypt_config {
2066     #define MAX_TAG_SIZE 480
2067     #define POOL_ENTRY_SIZE 512
2068    
2069     +static DEFINE_SPINLOCK(dm_crypt_clients_lock);
2070     +static unsigned dm_crypt_clients_n = 0;
2071     +static volatile unsigned long dm_crypt_pages_per_client;
2072     +#define DM_CRYPT_MEMORY_PERCENT 2
2073     +#define DM_CRYPT_MIN_PAGES_PER_CLIENT (BIO_MAX_PAGES * 16)
2074     +
2075     static void clone_init(struct dm_crypt_io *, struct bio *);
2076     static void kcryptd_queue_crypt(struct dm_crypt_io *io);
2077     static struct scatterlist *crypt_get_sg_data(struct crypt_config *cc,
2078     @@ -2156,6 +2164,43 @@ static int crypt_wipe_key(struct crypt_config *cc)
2079     return r;
2080     }
2081    
2082     +static void crypt_calculate_pages_per_client(void)
2083     +{
2084     + unsigned long pages = (totalram_pages - totalhigh_pages) * DM_CRYPT_MEMORY_PERCENT / 100;
2085     +
2086     + if (!dm_crypt_clients_n)
2087     + return;
2088     +
2089     + pages /= dm_crypt_clients_n;
2090     + if (pages < DM_CRYPT_MIN_PAGES_PER_CLIENT)
2091     + pages = DM_CRYPT_MIN_PAGES_PER_CLIENT;
2092     + dm_crypt_pages_per_client = pages;
2093     +}
2094     +
2095     +static void *crypt_page_alloc(gfp_t gfp_mask, void *pool_data)
2096     +{
2097     + struct crypt_config *cc = pool_data;
2098     + struct page *page;
2099     +
2100     + if (unlikely(percpu_counter_compare(&cc->n_allocated_pages, dm_crypt_pages_per_client) >= 0) &&
2101     + likely(gfp_mask & __GFP_NORETRY))
2102     + return NULL;
2103     +
2104     + page = alloc_page(gfp_mask);
2105     + if (likely(page != NULL))
2106     + percpu_counter_add(&cc->n_allocated_pages, 1);
2107     +
2108     + return page;
2109     +}
2110     +
2111     +static void crypt_page_free(void *page, void *pool_data)
2112     +{
2113     + struct crypt_config *cc = pool_data;
2114     +
2115     + __free_page(page);
2116     + percpu_counter_sub(&cc->n_allocated_pages, 1);
2117     +}
2118     +
2119     static void crypt_dtr(struct dm_target *ti)
2120     {
2121     struct crypt_config *cc = ti->private;
2122     @@ -2182,6 +2227,10 @@ static void crypt_dtr(struct dm_target *ti)
2123     mempool_destroy(cc->req_pool);
2124     mempool_destroy(cc->tag_pool);
2125    
2126     + if (cc->page_pool)
2127     + WARN_ON(percpu_counter_sum(&cc->n_allocated_pages) != 0);
2128     + percpu_counter_destroy(&cc->n_allocated_pages);
2129     +
2130     if (cc->iv_gen_ops && cc->iv_gen_ops->dtr)
2131     cc->iv_gen_ops->dtr(cc);
2132    
2133     @@ -2196,6 +2245,12 @@ static void crypt_dtr(struct dm_target *ti)
2134    
2135     /* Must zero key material before freeing */
2136     kzfree(cc);
2137     +
2138     + spin_lock(&dm_crypt_clients_lock);
2139     + WARN_ON(!dm_crypt_clients_n);
2140     + dm_crypt_clients_n--;
2141     + crypt_calculate_pages_per_client();
2142     + spin_unlock(&dm_crypt_clients_lock);
2143     }
2144    
2145     static int crypt_ctr_ivmode(struct dm_target *ti, const char *ivmode)
2146     @@ -2643,6 +2698,15 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2147    
2148     ti->private = cc;
2149    
2150     + spin_lock(&dm_crypt_clients_lock);
2151     + dm_crypt_clients_n++;
2152     + crypt_calculate_pages_per_client();
2153     + spin_unlock(&dm_crypt_clients_lock);
2154     +
2155     + ret = percpu_counter_init(&cc->n_allocated_pages, 0, GFP_KERNEL);
2156     + if (ret < 0)
2157     + goto bad;
2158     +
2159     /* Optional parameters need to be read before cipher constructor */
2160     if (argc > 5) {
2161     ret = crypt_ctr_optional(ti, argc - 5, &argv[5]);
2162     @@ -2697,7 +2761,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
2163     ALIGN(sizeof(struct dm_crypt_io) + cc->dmreq_start + additional_req_size,
2164     ARCH_KMALLOC_MINALIGN);
2165    
2166     - cc->page_pool = mempool_create_page_pool(BIO_MAX_PAGES, 0);
2167     + cc->page_pool = mempool_create(BIO_MAX_PAGES, crypt_page_alloc, crypt_page_free, cc);
2168     if (!cc->page_pool) {
2169     ti->error = "Cannot allocate page mempool";
2170     goto bad;
2171     diff --git a/drivers/media/platform/vivid/vivid-vid-common.c b/drivers/media/platform/vivid/vivid-vid-common.c
2172     index f0f423c7ca41..6f6d4df1e8a8 100644
2173     --- a/drivers/media/platform/vivid/vivid-vid-common.c
2174     +++ b/drivers/media/platform/vivid/vivid-vid-common.c
2175     @@ -858,7 +858,8 @@ int vidioc_g_edid(struct file *file, void *_fh,
2176     return -EINVAL;
2177     if (edid->start_block + edid->blocks > dev->edid_blocks)
2178     edid->blocks = dev->edid_blocks - edid->start_block;
2179     - cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
2180     + if (adap)
2181     + cec_set_edid_phys_addr(dev->edid, dev->edid_blocks * 128, adap->phys_addr);
2182     memcpy(edid->edid, dev->edid + edid->start_block * 128, edid->blocks * 128);
2183     return 0;
2184     }
2185     diff --git a/drivers/media/platform/vsp1/vsp1_wpf.c b/drivers/media/platform/vsp1/vsp1_wpf.c
2186     index f7f3b4b2c2de..8bd6b2f1af15 100644
2187     --- a/drivers/media/platform/vsp1/vsp1_wpf.c
2188     +++ b/drivers/media/platform/vsp1/vsp1_wpf.c
2189     @@ -452,7 +452,7 @@ static void wpf_configure(struct vsp1_entity *entity,
2190     : VI6_WPF_SRCRPF_RPF_ACT_SUB(input->entity.index);
2191     }
2192    
2193     - if (pipe->bru || pipe->num_inputs > 1)
2194     + if (pipe->bru)
2195     srcrpf |= pipe->bru->type == VSP1_ENTITY_BRU
2196     ? VI6_WPF_SRCRPF_VIRACT_MST
2197     : VI6_WPF_SRCRPF_VIRACT2_MST;
2198     diff --git a/drivers/misc/cxl/cxllib.c b/drivers/misc/cxl/cxllib.c
2199     index dc9bc1807fdf..562a6803d690 100644
2200     --- a/drivers/misc/cxl/cxllib.c
2201     +++ b/drivers/misc/cxl/cxllib.c
2202     @@ -207,49 +207,74 @@ int cxllib_get_PE_attributes(struct task_struct *task,
2203     }
2204     EXPORT_SYMBOL_GPL(cxllib_get_PE_attributes);
2205    
2206     -int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
2207     +static int get_vma_info(struct mm_struct *mm, u64 addr,
2208     + u64 *vma_start, u64 *vma_end,
2209     + unsigned long *page_size)
2210     {
2211     - int rc;
2212     - u64 dar;
2213     struct vm_area_struct *vma = NULL;
2214     - unsigned long page_size;
2215     -
2216     - if (mm == NULL)
2217     - return -EFAULT;
2218     + int rc = 0;
2219    
2220     down_read(&mm->mmap_sem);
2221    
2222     vma = find_vma(mm, addr);
2223     if (!vma) {
2224     - pr_err("Can't find vma for addr %016llx\n", addr);
2225     rc = -EFAULT;
2226     goto out;
2227     }
2228     - /* get the size of the pages allocated */
2229     - page_size = vma_kernel_pagesize(vma);
2230     -
2231     - for (dar = (addr & ~(page_size - 1)); dar < (addr + size); dar += page_size) {
2232     - if (dar < vma->vm_start || dar >= vma->vm_end) {
2233     - vma = find_vma(mm, addr);
2234     - if (!vma) {
2235     - pr_err("Can't find vma for addr %016llx\n", addr);
2236     - rc = -EFAULT;
2237     - goto out;
2238     - }
2239     - /* get the size of the pages allocated */
2240     - page_size = vma_kernel_pagesize(vma);
2241     + *page_size = vma_kernel_pagesize(vma);
2242     + *vma_start = vma->vm_start;
2243     + *vma_end = vma->vm_end;
2244     +out:
2245     + up_read(&mm->mmap_sem);
2246     + return rc;
2247     +}
2248     +
2249     +int cxllib_handle_fault(struct mm_struct *mm, u64 addr, u64 size, u64 flags)
2250     +{
2251     + int rc;
2252     + u64 dar, vma_start, vma_end;
2253     + unsigned long page_size;
2254     +
2255     + if (mm == NULL)
2256     + return -EFAULT;
2257     +
2258     + /*
2259     + * The buffer we have to process can extend over several pages
2260     + * and may also cover several VMAs.
2261     + * We iterate over all the pages. The page size could vary
2262     + * between VMAs.
2263     + */
2264     + rc = get_vma_info(mm, addr, &vma_start, &vma_end, &page_size);
2265     + if (rc)
2266     + return rc;
2267     +
2268     + for (dar = (addr & ~(page_size - 1)); dar < (addr + size);
2269     + dar += page_size) {
2270     + if (dar < vma_start || dar >= vma_end) {
2271     + /*
2272     + * We don't hold the mm->mmap_sem semaphore
2273     + * while iterating, since the semaphore is
2274     + * required by one of the lower-level page
2275     + * fault processing functions and it could
2276     + * create a deadlock.
2277     + *
2278     + * It means the VMAs can be altered between 2
2279     + * loop iterations and we could theoretically
2280     + * miss a page (however unlikely). But that's
2281     + * not really a problem, as the driver will
2282     + * retry access, get another page fault on the
2283     + * missing page and call us again.
2284     + */
2285     + rc = get_vma_info(mm, dar, &vma_start, &vma_end,
2286     + &page_size);
2287     + if (rc)
2288     + return rc;
2289     }
2290    
2291     rc = cxl_handle_mm_fault(mm, flags, dar);
2292     - if (rc) {
2293     - pr_err("cxl_handle_mm_fault failed %d", rc);
2294     - rc = -EFAULT;
2295     - goto out;
2296     - }
2297     + if (rc)
2298     + return -EFAULT;
2299     }
2300     - rc = 0;
2301     -out:
2302     - up_read(&mm->mmap_sem);
2303     - return rc;
2304     + return 0;
2305     }
2306     EXPORT_SYMBOL_GPL(cxllib_handle_fault);
2307     diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c
2308     index 7db8c7a8d38d..48b67f552afe 100644
2309     --- a/drivers/mmc/host/jz4740_mmc.c
2310     +++ b/drivers/mmc/host/jz4740_mmc.c
2311     @@ -362,9 +362,9 @@ static void jz4740_mmc_set_irq_enabled(struct jz4740_mmc_host *host,
2312     host->irq_mask &= ~irq;
2313     else
2314     host->irq_mask |= irq;
2315     - spin_unlock_irqrestore(&host->lock, flags);
2316    
2317     writew(host->irq_mask, host->base + JZ_REG_MMC_IMASK);
2318     + spin_unlock_irqrestore(&host->lock, flags);
2319     }
2320    
2321     static void jz4740_mmc_clock_enable(struct jz4740_mmc_host *host,
2322     diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
2323     index 5bedf4b7f0f7..44da037b13ba 100644
2324     --- a/drivers/mmc/host/sdhci-pci-core.c
2325     +++ b/drivers/mmc/host/sdhci-pci-core.c
2326     @@ -1192,7 +1192,7 @@ static void amd_enable_manual_tuning(struct pci_dev *pdev)
2327     pci_write_config_dword(pdev, AMD_SD_MISC_CONTROL, val);
2328     }
2329    
2330     -static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
2331     +static int amd_execute_tuning_hs200(struct sdhci_host *host, u32 opcode)
2332     {
2333     struct sdhci_pci_slot *slot = sdhci_priv(host);
2334     struct pci_dev *pdev = slot->chip->pdev;
2335     @@ -1231,6 +1231,27 @@ static int amd_execute_tuning(struct sdhci_host *host, u32 opcode)
2336     return 0;
2337     }
2338    
2339     +static int amd_execute_tuning(struct mmc_host *mmc, u32 opcode)
2340     +{
2341     + struct sdhci_host *host = mmc_priv(mmc);
2342     +
2343     + /* AMD requires custom HS200 tuning */
2344     + if (host->timing == MMC_TIMING_MMC_HS200)
2345     + return amd_execute_tuning_hs200(host, opcode);
2346     +
2347     + /* Otherwise perform standard SDHCI tuning */
2348     + return sdhci_execute_tuning(mmc, opcode);
2349     +}
2350     +
2351     +static int amd_probe_slot(struct sdhci_pci_slot *slot)
2352     +{
2353     + struct mmc_host_ops *ops = &slot->host->mmc_host_ops;
2354     +
2355     + ops->execute_tuning = amd_execute_tuning;
2356     +
2357     + return 0;
2358     +}
2359     +
2360     static int amd_probe(struct sdhci_pci_chip *chip)
2361     {
2362     struct pci_dev *smbus_dev;
2363     @@ -1265,12 +1286,12 @@ static const struct sdhci_ops amd_sdhci_pci_ops = {
2364     .set_bus_width = sdhci_set_bus_width,
2365     .reset = sdhci_reset,
2366     .set_uhs_signaling = sdhci_set_uhs_signaling,
2367     - .platform_execute_tuning = amd_execute_tuning,
2368     };
2369    
2370     static const struct sdhci_pci_fixes sdhci_amd = {
2371     .probe = amd_probe,
2372     .ops = &amd_sdhci_pci_ops,
2373     + .probe_slot = amd_probe_slot,
2374     };
2375    
2376     static const struct pci_device_id pci_ids[] = {
2377     diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c
2378     index 3a6d49f07e22..de1562f27fdb 100644
2379     --- a/drivers/mmc/host/tmio_mmc_core.c
2380     +++ b/drivers/mmc/host/tmio_mmc_core.c
2381     @@ -911,7 +911,7 @@ static void tmio_mmc_finish_request(struct tmio_mmc_host *host)
2382     host->check_scc_error(host);
2383    
2384     /* If SET_BLOCK_COUNT, continue with main command */
2385     - if (host->mrq) {
2386     + if (host->mrq && !mrq->cmd->error) {
2387     tmio_process_mrq(host, mrq);
2388     return;
2389     }
2390     diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
2391     index b1fc28f63882..d0b63bbf46a7 100644
2392     --- a/drivers/mtd/ubi/block.c
2393     +++ b/drivers/mtd/ubi/block.c
2394     @@ -244,7 +244,7 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
2395     * in any case.
2396     */
2397     if (mode & FMODE_WRITE) {
2398     - ret = -EPERM;
2399     + ret = -EROFS;
2400     goto out_unlock;
2401     }
2402    
2403     diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
2404     index 842550b5712a..defb1cd8d2e1 100644
2405     --- a/drivers/mtd/ubi/build.c
2406     +++ b/drivers/mtd/ubi/build.c
2407     @@ -845,6 +845,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
2408     return -EINVAL;
2409     }
2410    
2411     + /*
2412     + * Both UBI and UBIFS have been designed for SLC NAND and NOR flashes.
2413     + * MLC NAND is different and needs special care, otherwise UBI or UBIFS
2414     + * will die soon and you will lose all your data.
2415     + */
2416     + if (mtd->type == MTD_MLCNANDFLASH) {
2417     + pr_err("ubi: refuse attaching mtd%d - MLC NAND is not supported\n",
2418     + mtd->index);
2419     + return -EINVAL;
2420     + }
2421     +
2422     if (ubi_num == UBI_DEV_NUM_AUTO) {
2423     /* Search for an empty slot in the @ubi_devices array */
2424     for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++)
2425     diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
2426     index 4f0bd6b4422a..69dd21679a30 100644
2427     --- a/drivers/mtd/ubi/fastmap-wl.c
2428     +++ b/drivers/mtd/ubi/fastmap-wl.c
2429     @@ -362,7 +362,6 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
2430     {
2431     int i;
2432    
2433     - flush_work(&ubi->fm_work);
2434     return_unused_pool_pebs(ubi, &ubi->fm_pool);
2435     return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
2436    
2437     diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
2438     index c8e7b54a538a..73da5e63a609 100644
2439     --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
2440     +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c
2441     @@ -53,6 +53,7 @@
2442     #include <linux/stringify.h>
2443     #include "iwl-config.h"
2444     #include "iwl-agn-hw.h"
2445     +#include "fw/file.h"
2446    
2447     /* Highest firmware API version supported */
2448     #define IWL9000_UCODE_API_MAX 34
2449     @@ -264,6 +265,67 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = {
2450     .integrated = true,
2451     .soc_latency = 5000,
2452     };
2453     +
2454     +const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = {
2455     + .name = "Intel(R) Dual Band Wireless AC 9460",
2456     + .fw_name_pre = IWL9000A_FW_PRE,
2457     + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
2458     + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
2459     + IWL_DEVICE_9000,
2460     + .ht_params = &iwl9000_ht_params,
2461     + .nvm_ver = IWL9000_NVM_VERSION,
2462     + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
2463     + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
2464     + .integrated = true,
2465     + .soc_latency = 5000,
2466     + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
2467     +};
2468     +
2469     +const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = {
2470     + .name = "Intel(R) Dual Band Wireless AC 9461",
2471     + .fw_name_pre = IWL9000A_FW_PRE,
2472     + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
2473     + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
2474     + IWL_DEVICE_9000,
2475     + .ht_params = &iwl9000_ht_params,
2476     + .nvm_ver = IWL9000_NVM_VERSION,
2477     + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
2478     + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
2479     + .integrated = true,
2480     + .soc_latency = 5000,
2481     + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
2482     +};
2483     +
2484     +const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = {
2485     + .name = "Intel(R) Dual Band Wireless AC 9462",
2486     + .fw_name_pre = IWL9000A_FW_PRE,
2487     + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
2488     + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
2489     + IWL_DEVICE_9000,
2490     + .ht_params = &iwl9000_ht_params,
2491     + .nvm_ver = IWL9000_NVM_VERSION,
2492     + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
2493     + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
2494     + .integrated = true,
2495     + .soc_latency = 5000,
2496     + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
2497     +};
2498     +
2499     +const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = {
2500     + .name = "Intel(R) Dual Band Wireless AC 9560",
2501     + .fw_name_pre = IWL9000A_FW_PRE,
2502     + .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
2503     + .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
2504     + IWL_DEVICE_9000,
2505     + .ht_params = &iwl9000_ht_params,
2506     + .nvm_ver = IWL9000_NVM_VERSION,
2507     + .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
2508     + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
2509     + .integrated = true,
2510     + .soc_latency = 5000,
2511     + .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK
2512     +};
2513     +
2514     MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
2515     MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
2516     MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
2517     diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h
2518     index e988e4c371c4..1b3ad8ef0c79 100644
2519     --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h
2520     +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h
2521     @@ -434,6 +434,7 @@ enum iwl_fw_phy_cfg {
2522     FW_PHY_CFG_TX_CHAIN = 0xf << FW_PHY_CFG_TX_CHAIN_POS,
2523     FW_PHY_CFG_RX_CHAIN_POS = 20,
2524     FW_PHY_CFG_RX_CHAIN = 0xf << FW_PHY_CFG_RX_CHAIN_POS,
2525     + FW_PHY_CFG_SHARED_CLK = BIT(31),
2526     };
2527    
2528     #define IWL_UCODE_MAX_CS 1
2529     diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
2530     index e226179c32fa..85fe1a928adc 100644
2531     --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h
2532     +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h
2533     @@ -394,6 +394,7 @@ struct iwl_cfg {
2534     u8 max_vht_ampdu_exponent;
2535     u8 ucode_api_max;
2536     u8 ucode_api_min;
2537     + u32 extra_phy_cfg_flags;
2538     };
2539    
2540     /*
2541     @@ -476,6 +477,10 @@ extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
2542     extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
2543     extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
2544     extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
2545     +extern const struct iwl_cfg iwl9460_2ac_cfg_shared_clk;
2546     +extern const struct iwl_cfg iwl9461_2ac_cfg_shared_clk;
2547     +extern const struct iwl_cfg iwl9462_2ac_cfg_shared_clk;
2548     +extern const struct iwl_cfg iwl9560_2ac_cfg_shared_clk;
2549     extern const struct iwl_cfg iwla000_2ac_cfg_hr;
2550     extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
2551     extern const struct iwl_cfg iwla000_2ac_cfg_jf;
2552     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2553     index 83485493a79a..b71a9d11a50f 100644
2554     --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2555     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2556     @@ -435,6 +435,10 @@ static int iwl_send_phy_cfg_cmd(struct iwl_mvm *mvm)
2557    
2558     /* Set parameters */
2559     phy_cfg_cmd.phy_cfg = cpu_to_le32(iwl_mvm_get_phy_config(mvm));
2560     +
2561     + /* set flags extra PHY configuration flags from the device's cfg */
2562     + phy_cfg_cmd.phy_cfg |= cpu_to_le32(mvm->cfg->extra_phy_cfg_flags);
2563     +
2564     phy_cfg_cmd.calib_control.event_trigger =
2565     mvm->fw->default_calib[ucode_type].event_trigger;
2566     phy_cfg_cmd.calib_control.flow_trigger =
2567     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2568     index 0f7bd37bf172..9a8605abb00a 100644
2569     --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2570     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c
2571     @@ -8,6 +8,7 @@
2572     * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
2573     * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
2574     * Copyright(c) 2016-2017 Intel Deutschland GmbH
2575     + * Copyright(c) 2018 Intel Corporation
2576     *
2577     * This program is free software; you can redistribute it and/or modify
2578     * it under the terms of version 2 of the GNU General Public License as
2579     @@ -36,6 +37,7 @@
2580     * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
2581     * All rights reserved.
2582     * Copyright(c) 2017 Intel Deutschland GmbH
2583     + * Copyright(c) 2018 Intel Corporation
2584     *
2585     * Redistribution and use in source and binary forms, with or without
2586     * modification, are permitted provided that the following conditions
2587     @@ -515,9 +517,9 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
2588     {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)},
2589    
2590     /* 9000 Series */
2591     - {IWL_PCI_DEVICE(0x2526, 0x0000, iwl9260_2ac_cfg)},
2592     {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)},
2593     {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)},
2594     + {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)},
2595     {IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
2596     {IWL_PCI_DEVICE(0x2526, 0x0034, iwl9560_2ac_cfg)},
2597     {IWL_PCI_DEVICE(0x2526, 0x0038, iwl9560_2ac_cfg)},
2598     @@ -542,11 +544,15 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
2599     {IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
2600     {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
2601     {IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
2602     + {IWL_PCI_DEVICE(0x2526, 0x2030, iwl9560_2ac_cfg_soc)},
2603     + {IWL_PCI_DEVICE(0x2526, 0x2034, iwl9560_2ac_cfg_soc)},
2604     {IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
2605     {IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
2606     + {IWL_PCI_DEVICE(0x2526, 0x4034, iwl9560_2ac_cfg_soc)},
2607     {IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
2608     - {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
2609     + {IWL_PCI_DEVICE(0x2526, 0x4234, iwl9560_2ac_cfg_soc)},
2610     {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
2611     + {IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
2612     {IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
2613     {IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
2614     {IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
2615     @@ -567,38 +573,146 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
2616     {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
2617     {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
2618     {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
2619     + {IWL_PCI_DEVICE(0x2720, 0x1010, iwl9260_2ac_cfg)},
2620     + {IWL_PCI_DEVICE(0x2720, 0x1030, iwl9560_2ac_cfg_soc)},
2621     + {IWL_PCI_DEVICE(0x2720, 0x1210, iwl9260_2ac_cfg)},
2622     + {IWL_PCI_DEVICE(0x2720, 0x2030, iwl9560_2ac_cfg_soc)},
2623     + {IWL_PCI_DEVICE(0x2720, 0x2034, iwl9560_2ac_cfg_soc)},
2624     {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
2625     + {IWL_PCI_DEVICE(0x2720, 0x4034, iwl9560_2ac_cfg_soc)},
2626     {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
2627     + {IWL_PCI_DEVICE(0x2720, 0x4234, iwl9560_2ac_cfg_soc)},
2628     + {IWL_PCI_DEVICE(0x2720, 0x42A4, iwl9462_2ac_cfg_soc)},
2629     + {IWL_PCI_DEVICE(0x30DC, 0x0030, iwl9560_2ac_cfg_soc)},
2630     + {IWL_PCI_DEVICE(0x30DC, 0x0034, iwl9560_2ac_cfg_soc)},
2631     + {IWL_PCI_DEVICE(0x30DC, 0x0038, iwl9560_2ac_cfg_soc)},
2632     + {IWL_PCI_DEVICE(0x30DC, 0x003C, iwl9560_2ac_cfg_soc)},
2633     {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
2634     {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
2635     {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
2636     {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
2637     + {IWL_PCI_DEVICE(0x30DC, 0x0230, iwl9560_2ac_cfg_soc)},
2638     + {IWL_PCI_DEVICE(0x30DC, 0x0234, iwl9560_2ac_cfg_soc)},
2639     + {IWL_PCI_DEVICE(0x30DC, 0x0238, iwl9560_2ac_cfg_soc)},
2640     + {IWL_PCI_DEVICE(0x30DC, 0x023C, iwl9560_2ac_cfg_soc)},
2641     {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
2642     {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
2643     {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
2644     {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
2645     - {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
2646     - {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
2647     - {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
2648     - {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
2649     - {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
2650     - {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
2651     - {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
2652     - {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
2653     - {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
2654     - {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
2655     - {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
2656     - {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
2657     - {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
2658     - {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
2659     - {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
2660     - {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
2661     - {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
2662     - {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
2663     - {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
2664     + {IWL_PCI_DEVICE(0x30DC, 0x1010, iwl9260_2ac_cfg)},
2665     + {IWL_PCI_DEVICE(0x30DC, 0x1030, iwl9560_2ac_cfg_soc)},
2666     + {IWL_PCI_DEVICE(0x30DC, 0x1210, iwl9260_2ac_cfg)},
2667     + {IWL_PCI_DEVICE(0x30DC, 0x2030, iwl9560_2ac_cfg_soc)},
2668     + {IWL_PCI_DEVICE(0x30DC, 0x2034, iwl9560_2ac_cfg_soc)},
2669     + {IWL_PCI_DEVICE(0x30DC, 0x4030, iwl9560_2ac_cfg_soc)},
2670     + {IWL_PCI_DEVICE(0x30DC, 0x4034, iwl9560_2ac_cfg_soc)},
2671     + {IWL_PCI_DEVICE(0x30DC, 0x40A4, iwl9462_2ac_cfg_soc)},
2672     + {IWL_PCI_DEVICE(0x30DC, 0x4234, iwl9560_2ac_cfg_soc)},
2673     + {IWL_PCI_DEVICE(0x30DC, 0x42A4, iwl9462_2ac_cfg_soc)},
2674     + {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_shared_clk)},
2675     + {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_shared_clk)},
2676     + {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_shared_clk)},
2677     + {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_shared_clk)},
2678     + {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_shared_clk)},
2679     + {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_shared_clk)},
2680     + {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_shared_clk)},
2681     + {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_shared_clk)},
2682     + {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_shared_clk)},
2683     + {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_shared_clk)},
2684     + {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_shared_clk)},
2685     + {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_shared_clk)},
2686     + {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_shared_clk)},
2687     + {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_shared_clk)},
2688     + {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_shared_clk)},
2689     + {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_shared_clk)},
2690     + {IWL_PCI_DEVICE(0x31DC, 0x1010, iwl9260_2ac_cfg)},
2691     + {IWL_PCI_DEVICE(0x31DC, 0x1030, iwl9560_2ac_cfg_shared_clk)},
2692     + {IWL_PCI_DEVICE(0x31DC, 0x1210, iwl9260_2ac_cfg)},
2693     + {IWL_PCI_DEVICE(0x31DC, 0x2030, iwl9560_2ac_cfg_shared_clk)},
2694     + {IWL_PCI_DEVICE(0x31DC, 0x2034, iwl9560_2ac_cfg_shared_clk)},
2695     + {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_shared_clk)},
2696     + {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_shared_clk)},
2697     + {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_shared_clk)},
2698     + {IWL_PCI_DEVICE(0x31DC, 0x4234, iwl9560_2ac_cfg_shared_clk)},
2699     + {IWL_PCI_DEVICE(0x31DC, 0x42A4, iwl9462_2ac_cfg_shared_clk)},
2700     {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
2701     {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
2702     + {IWL_PCI_DEVICE(0x34F0, 0x0038, iwl9560_2ac_cfg_soc)},
2703     + {IWL_PCI_DEVICE(0x34F0, 0x003C, iwl9560_2ac_cfg_soc)},
2704     + {IWL_PCI_DEVICE(0x34F0, 0x0060, iwl9461_2ac_cfg_soc)},
2705     + {IWL_PCI_DEVICE(0x34F0, 0x0064, iwl9461_2ac_cfg_soc)},
2706     + {IWL_PCI_DEVICE(0x34F0, 0x00A0, iwl9462_2ac_cfg_soc)},
2707     + {IWL_PCI_DEVICE(0x34F0, 0x00A4, iwl9462_2ac_cfg_soc)},
2708     + {IWL_PCI_DEVICE(0x34F0, 0x0230, iwl9560_2ac_cfg_soc)},
2709     + {IWL_PCI_DEVICE(0x34F0, 0x0234, iwl9560_2ac_cfg_soc)},
2710     + {IWL_PCI_DEVICE(0x34F0, 0x0238, iwl9560_2ac_cfg_soc)},
2711     + {IWL_PCI_DEVICE(0x34F0, 0x023C, iwl9560_2ac_cfg_soc)},
2712     + {IWL_PCI_DEVICE(0x34F0, 0x0260, iwl9461_2ac_cfg_soc)},
2713     + {IWL_PCI_DEVICE(0x34F0, 0x0264, iwl9461_2ac_cfg_soc)},
2714     + {IWL_PCI_DEVICE(0x34F0, 0x02A0, iwl9462_2ac_cfg_soc)},
2715     {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
2716     + {IWL_PCI_DEVICE(0x34F0, 0x1010, iwl9260_2ac_cfg)},
2717     + {IWL_PCI_DEVICE(0x34F0, 0x1030, iwl9560_2ac_cfg_soc)},
2718     + {IWL_PCI_DEVICE(0x34F0, 0x1210, iwl9260_2ac_cfg)},
2719     + {IWL_PCI_DEVICE(0x34F0, 0x2030, iwl9560_2ac_cfg_soc)},
2720     + {IWL_PCI_DEVICE(0x34F0, 0x2034, iwl9560_2ac_cfg_soc)},
2721     + {IWL_PCI_DEVICE(0x34F0, 0x4030, iwl9560_2ac_cfg_soc)},
2722     + {IWL_PCI_DEVICE(0x34F0, 0x4034, iwl9560_2ac_cfg_soc)},
2723     + {IWL_PCI_DEVICE(0x34F0, 0x40A4, iwl9462_2ac_cfg_soc)},
2724     + {IWL_PCI_DEVICE(0x34F0, 0x4234, iwl9560_2ac_cfg_soc)},
2725     + {IWL_PCI_DEVICE(0x34F0, 0x42A4, iwl9462_2ac_cfg_soc)},
2726     + {IWL_PCI_DEVICE(0x3DF0, 0x0030, iwl9560_2ac_cfg_soc)},
2727     + {IWL_PCI_DEVICE(0x3DF0, 0x0034, iwl9560_2ac_cfg_soc)},
2728     + {IWL_PCI_DEVICE(0x3DF0, 0x0038, iwl9560_2ac_cfg_soc)},
2729     + {IWL_PCI_DEVICE(0x3DF0, 0x003C, iwl9560_2ac_cfg_soc)},
2730     + {IWL_PCI_DEVICE(0x3DF0, 0x0060, iwl9461_2ac_cfg_soc)},
2731     + {IWL_PCI_DEVICE(0x3DF0, 0x0064, iwl9461_2ac_cfg_soc)},
2732     + {IWL_PCI_DEVICE(0x3DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
2733     + {IWL_PCI_DEVICE(0x3DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
2734     + {IWL_PCI_DEVICE(0x3DF0, 0x0230, iwl9560_2ac_cfg_soc)},
2735     + {IWL_PCI_DEVICE(0x3DF0, 0x0234, iwl9560_2ac_cfg_soc)},
2736     + {IWL_PCI_DEVICE(0x3DF0, 0x0238, iwl9560_2ac_cfg_soc)},
2737     + {IWL_PCI_DEVICE(0x3DF0, 0x023C, iwl9560_2ac_cfg_soc)},
2738     + {IWL_PCI_DEVICE(0x3DF0, 0x0260, iwl9461_2ac_cfg_soc)},
2739     + {IWL_PCI_DEVICE(0x3DF0, 0x0264, iwl9461_2ac_cfg_soc)},
2740     + {IWL_PCI_DEVICE(0x3DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
2741     + {IWL_PCI_DEVICE(0x3DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
2742     + {IWL_PCI_DEVICE(0x3DF0, 0x1010, iwl9260_2ac_cfg)},
2743     + {IWL_PCI_DEVICE(0x3DF0, 0x1030, iwl9560_2ac_cfg_soc)},
2744     + {IWL_PCI_DEVICE(0x3DF0, 0x1210, iwl9260_2ac_cfg)},
2745     + {IWL_PCI_DEVICE(0x3DF0, 0x2030, iwl9560_2ac_cfg_soc)},
2746     + {IWL_PCI_DEVICE(0x3DF0, 0x2034, iwl9560_2ac_cfg_soc)},
2747     + {IWL_PCI_DEVICE(0x3DF0, 0x4030, iwl9560_2ac_cfg_soc)},
2748     + {IWL_PCI_DEVICE(0x3DF0, 0x4034, iwl9560_2ac_cfg_soc)},
2749     + {IWL_PCI_DEVICE(0x3DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
2750     + {IWL_PCI_DEVICE(0x3DF0, 0x4234, iwl9560_2ac_cfg_soc)},
2751     + {IWL_PCI_DEVICE(0x3DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
2752     + {IWL_PCI_DEVICE(0x43F0, 0x0030, iwl9560_2ac_cfg_soc)},
2753     + {IWL_PCI_DEVICE(0x43F0, 0x0034, iwl9560_2ac_cfg_soc)},
2754     + {IWL_PCI_DEVICE(0x43F0, 0x0038, iwl9560_2ac_cfg_soc)},
2755     + {IWL_PCI_DEVICE(0x43F0, 0x003C, iwl9560_2ac_cfg_soc)},
2756     + {IWL_PCI_DEVICE(0x43F0, 0x0060, iwl9461_2ac_cfg_soc)},
2757     + {IWL_PCI_DEVICE(0x43F0, 0x0064, iwl9461_2ac_cfg_soc)},
2758     + {IWL_PCI_DEVICE(0x43F0, 0x00A0, iwl9462_2ac_cfg_soc)},
2759     + {IWL_PCI_DEVICE(0x43F0, 0x00A4, iwl9462_2ac_cfg_soc)},
2760     + {IWL_PCI_DEVICE(0x43F0, 0x0230, iwl9560_2ac_cfg_soc)},
2761     + {IWL_PCI_DEVICE(0x43F0, 0x0234, iwl9560_2ac_cfg_soc)},
2762     + {IWL_PCI_DEVICE(0x43F0, 0x0238, iwl9560_2ac_cfg_soc)},
2763     + {IWL_PCI_DEVICE(0x43F0, 0x023C, iwl9560_2ac_cfg_soc)},
2764     + {IWL_PCI_DEVICE(0x43F0, 0x0260, iwl9461_2ac_cfg_soc)},
2765     + {IWL_PCI_DEVICE(0x43F0, 0x0264, iwl9461_2ac_cfg_soc)},
2766     + {IWL_PCI_DEVICE(0x43F0, 0x02A0, iwl9462_2ac_cfg_soc)},
2767     + {IWL_PCI_DEVICE(0x43F0, 0x02A4, iwl9462_2ac_cfg_soc)},
2768     + {IWL_PCI_DEVICE(0x43F0, 0x1010, iwl9260_2ac_cfg)},
2769     + {IWL_PCI_DEVICE(0x43F0, 0x1030, iwl9560_2ac_cfg_soc)},
2770     + {IWL_PCI_DEVICE(0x43F0, 0x1210, iwl9260_2ac_cfg)},
2771     + {IWL_PCI_DEVICE(0x43F0, 0x2030, iwl9560_2ac_cfg_soc)},
2772     + {IWL_PCI_DEVICE(0x43F0, 0x2034, iwl9560_2ac_cfg_soc)},
2773     + {IWL_PCI_DEVICE(0x43F0, 0x4030, iwl9560_2ac_cfg_soc)},
2774     + {IWL_PCI_DEVICE(0x43F0, 0x4034, iwl9560_2ac_cfg_soc)},
2775     + {IWL_PCI_DEVICE(0x43F0, 0x40A4, iwl9462_2ac_cfg_soc)},
2776     + {IWL_PCI_DEVICE(0x43F0, 0x4234, iwl9560_2ac_cfg_soc)},
2777     + {IWL_PCI_DEVICE(0x43F0, 0x42A4, iwl9462_2ac_cfg_soc)},
2778     {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
2779     {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
2780     {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
2781     @@ -624,11 +738,44 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
2782     {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
2783     {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
2784     {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
2785     + {IWL_PCI_DEVICE(0x9DF0, 0x1010, iwl9260_2ac_cfg)},
2786     + {IWL_PCI_DEVICE(0x9DF0, 0x1030, iwl9560_2ac_cfg_soc)},
2787     + {IWL_PCI_DEVICE(0x9DF0, 0x1210, iwl9260_2ac_cfg)},
2788     {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
2789     + {IWL_PCI_DEVICE(0x9DF0, 0x2030, iwl9560_2ac_cfg_soc)},
2790     + {IWL_PCI_DEVICE(0x9DF0, 0x2034, iwl9560_2ac_cfg_soc)},
2791     {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
2792     {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
2793     {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
2794     {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
2795     + {IWL_PCI_DEVICE(0x9DF0, 0x4234, iwl9560_2ac_cfg_soc)},
2796     + {IWL_PCI_DEVICE(0x9DF0, 0x42A4, iwl9462_2ac_cfg_soc)},
2797     + {IWL_PCI_DEVICE(0xA0F0, 0x0030, iwl9560_2ac_cfg_soc)},
2798     + {IWL_PCI_DEVICE(0xA0F0, 0x0034, iwl9560_2ac_cfg_soc)},
2799     + {IWL_PCI_DEVICE(0xA0F0, 0x0038, iwl9560_2ac_cfg_soc)},
2800     + {IWL_PCI_DEVICE(0xA0F0, 0x003C, iwl9560_2ac_cfg_soc)},
2801     + {IWL_PCI_DEVICE(0xA0F0, 0x0060, iwl9461_2ac_cfg_soc)},
2802     + {IWL_PCI_DEVICE(0xA0F0, 0x0064, iwl9461_2ac_cfg_soc)},
2803     + {IWL_PCI_DEVICE(0xA0F0, 0x00A0, iwl9462_2ac_cfg_soc)},
2804     + {IWL_PCI_DEVICE(0xA0F0, 0x00A4, iwl9462_2ac_cfg_soc)},
2805     + {IWL_PCI_DEVICE(0xA0F0, 0x0230, iwl9560_2ac_cfg_soc)},
2806     + {IWL_PCI_DEVICE(0xA0F0, 0x0234, iwl9560_2ac_cfg_soc)},
2807     + {IWL_PCI_DEVICE(0xA0F0, 0x0238, iwl9560_2ac_cfg_soc)},
2808     + {IWL_PCI_DEVICE(0xA0F0, 0x023C, iwl9560_2ac_cfg_soc)},
2809     + {IWL_PCI_DEVICE(0xA0F0, 0x0260, iwl9461_2ac_cfg_soc)},
2810     + {IWL_PCI_DEVICE(0xA0F0, 0x0264, iwl9461_2ac_cfg_soc)},
2811     + {IWL_PCI_DEVICE(0xA0F0, 0x02A0, iwl9462_2ac_cfg_soc)},
2812     + {IWL_PCI_DEVICE(0xA0F0, 0x02A4, iwl9462_2ac_cfg_soc)},
2813     + {IWL_PCI_DEVICE(0xA0F0, 0x1010, iwl9260_2ac_cfg)},
2814     + {IWL_PCI_DEVICE(0xA0F0, 0x1030, iwl9560_2ac_cfg_soc)},
2815     + {IWL_PCI_DEVICE(0xA0F0, 0x1210, iwl9260_2ac_cfg)},
2816     + {IWL_PCI_DEVICE(0xA0F0, 0x2030, iwl9560_2ac_cfg_soc)},
2817     + {IWL_PCI_DEVICE(0xA0F0, 0x2034, iwl9560_2ac_cfg_soc)},
2818     + {IWL_PCI_DEVICE(0xA0F0, 0x4030, iwl9560_2ac_cfg_soc)},
2819     + {IWL_PCI_DEVICE(0xA0F0, 0x4034, iwl9560_2ac_cfg_soc)},
2820     + {IWL_PCI_DEVICE(0xA0F0, 0x40A4, iwl9462_2ac_cfg_soc)},
2821     + {IWL_PCI_DEVICE(0xA0F0, 0x4234, iwl9560_2ac_cfg_soc)},
2822     + {IWL_PCI_DEVICE(0xA0F0, 0x42A4, iwl9462_2ac_cfg_soc)},
2823     {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
2824     {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
2825     {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
2826     @@ -645,10 +792,16 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
2827     {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
2828     {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
2829     {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
2830     + {IWL_PCI_DEVICE(0xA370, 0x1010, iwl9260_2ac_cfg)},
2831     {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
2832     + {IWL_PCI_DEVICE(0xA370, 0x1210, iwl9260_2ac_cfg)},
2833     + {IWL_PCI_DEVICE(0xA370, 0x2030, iwl9560_2ac_cfg_soc)},
2834     + {IWL_PCI_DEVICE(0xA370, 0x2034, iwl9560_2ac_cfg_soc)},
2835     {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
2836     {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
2837     {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
2838     + {IWL_PCI_DEVICE(0xA370, 0x4234, iwl9560_2ac_cfg_soc)},
2839     + {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
2840    
2841     /* a000 Series */
2842     {IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
2843     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2844     index c980cdbd6e53..a9ba9fe263ca 100644
2845     --- a/drivers/net/xen-netfront.c
2846     +++ b/drivers/net/xen-netfront.c
2847     @@ -2008,7 +2008,10 @@ static void netback_changed(struct xenbus_device *dev,
2848     case XenbusStateInitialised:
2849     case XenbusStateReconfiguring:
2850     case XenbusStateReconfigured:
2851     + break;
2852     +
2853     case XenbusStateUnknown:
2854     + wake_up_all(&module_unload_q);
2855     break;
2856    
2857     case XenbusStateInitWait:
2858     @@ -2139,7 +2142,9 @@ static int xennet_remove(struct xenbus_device *dev)
2859     xenbus_switch_state(dev, XenbusStateClosing);
2860     wait_event(module_unload_q,
2861     xenbus_read_driver_state(dev->otherend) ==
2862     - XenbusStateClosing);
2863     + XenbusStateClosing ||
2864     + xenbus_read_driver_state(dev->otherend) ==
2865     + XenbusStateUnknown);
2866    
2867     xenbus_switch_state(dev, XenbusStateClosed);
2868     wait_event(module_unload_q,
2869     diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
2870     index 98466d762c8f..0939f064054d 100644
2871     --- a/drivers/nvdimm/dimm.c
2872     +++ b/drivers/nvdimm/dimm.c
2873     @@ -65,9 +65,11 @@ static int nvdimm_probe(struct device *dev)
2874     ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
2875     nd_label_copy(ndd, to_next_namespace_index(ndd),
2876     to_current_namespace_index(ndd));
2877     - rc = nd_label_reserve_dpa(ndd);
2878     - if (ndd->ns_current >= 0)
2879     - nvdimm_set_aliasing(dev);
2880     + if (ndd->ns_current >= 0) {
2881     + rc = nd_label_reserve_dpa(ndd);
2882     + if (rc == 0)
2883     + nvdimm_set_aliasing(dev);
2884     + }
2885     nvdimm_clear_locked(dev);
2886     nvdimm_bus_unlock(dev);
2887    
2888     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
2889     index 0af988739a06..228bafa4d322 100644
2890     --- a/drivers/nvdimm/namespace_devs.c
2891     +++ b/drivers/nvdimm/namespace_devs.c
2892     @@ -1926,7 +1926,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region,
2893     }
2894    
2895     if (i < nd_region->ndr_mappings) {
2896     - struct nvdimm_drvdata *ndd = to_ndd(&nd_region->mapping[i]);
2897     + struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
2898    
2899     /*
2900     * Give up if we don't find an instance of a uuid at each
2901     @@ -1934,7 +1934,7 @@ struct device *create_namespace_pmem(struct nd_region *nd_region,
2902     * find a dimm with two instances of the same uuid.
2903     */
2904     dev_err(&nd_region->dev, "%s missing label for %pUb\n",
2905     - dev_name(ndd->dev), nd_label->uuid);
2906     + nvdimm_name(nvdimm), nd_label->uuid);
2907     rc = -EINVAL;
2908     goto err;
2909     }
2910     diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
2911     index 5ed2dcaa8e27..711875afdd70 100644
2912     --- a/drivers/pci/hotplug/acpiphp_glue.c
2913     +++ b/drivers/pci/hotplug/acpiphp_glue.c
2914     @@ -558,6 +558,7 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
2915     {
2916     unsigned long long sta = 0;
2917     struct acpiphp_func *func;
2918     + u32 dvid;
2919    
2920     list_for_each_entry(func, &slot->funcs, sibling) {
2921     if (func->flags & FUNC_HAS_STA) {
2922     @@ -568,19 +569,27 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
2923     if (ACPI_SUCCESS(status) && sta)
2924     break;
2925     } else {
2926     - u32 dvid;
2927     -
2928     - pci_bus_read_config_dword(slot->bus,
2929     - PCI_DEVFN(slot->device,
2930     - func->function),
2931     - PCI_VENDOR_ID, &dvid);
2932     - if (dvid != 0xffffffff) {
2933     + if (pci_bus_read_dev_vendor_id(slot->bus,
2934     + PCI_DEVFN(slot->device, func->function),
2935     + &dvid, 0)) {
2936     sta = ACPI_STA_ALL;
2937     break;
2938     }
2939     }
2940     }
2941    
2942     + if (!sta) {
2943     + /*
2944     + * Check for the slot itself since it may be that the
2945     + * ACPI slot is a device below PCIe upstream port so in
2946     + * that case it may not even be reachable yet.
2947     + */
2948     + if (pci_bus_read_dev_vendor_id(slot->bus,
2949     + PCI_DEVFN(slot->device, 0), &dvid, 0)) {
2950     + sta = ACPI_STA_ALL;
2951     + }
2952     + }
2953     +
2954     return (unsigned int)sta;
2955     }
2956    
2957     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2958     index 05fadcc4f9d2..5c5a8af66829 100644
2959     --- a/drivers/pci/quirks.c
2960     +++ b/drivers/pci/quirks.c
2961     @@ -4806,9 +4806,13 @@ static void quirk_no_ext_tags(struct pci_dev *pdev)
2962    
2963     pci_walk_bus(bridge->bus, pci_configure_extended_tags, NULL);
2964     }
2965     +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0132, quirk_no_ext_tags);
2966     DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0140, quirk_no_ext_tags);
2967     +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0141, quirk_no_ext_tags);
2968     DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0142, quirk_no_ext_tags);
2969     DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0144, quirk_no_ext_tags);
2970     +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0420, quirk_no_ext_tags);
2971     +DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
2972    
2973     #ifdef CONFIG_PCI_ATS
2974     /*
2975     diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
2976     index 1161e11fb3cf..afedb8cd1990 100644
2977     --- a/drivers/phy/allwinner/phy-sun4i-usb.c
2978     +++ b/drivers/phy/allwinner/phy-sun4i-usb.c
2979     @@ -410,11 +410,13 @@ static bool sun4i_usb_phy0_poll(struct sun4i_usb_phy_data *data)
2980     return true;
2981    
2982     /*
2983     - * The A31 companion pmic (axp221) does not generate vbus change
2984     - * interrupts when the board is driving vbus, so we must poll
2985     + * The A31/A23/A33 companion pmics (AXP221/AXP223) do not
2986     + * generate vbus change interrupts when the board is driving
2987     + * vbus using the N_VBUSEN pin on the pmic, so we must poll
2988     * when using the pmic for vbus-det _and_ we're driving vbus.
2989     */
2990     - if (data->cfg->type == sun6i_a31_phy &&
2991     + if ((data->cfg->type == sun6i_a31_phy ||
2992     + data->cfg->type == sun8i_a33_phy) &&
2993     data->vbus_power_supply && data->phys[0].regulator_on)
2994     return true;
2995    
2996     @@ -885,7 +887,7 @@ static const struct sun4i_usb_phy_cfg sun7i_a20_cfg = {
2997    
2998     static const struct sun4i_usb_phy_cfg sun8i_a23_cfg = {
2999     .num_phys = 2,
3000     - .type = sun4i_a10_phy,
3001     + .type = sun6i_a31_phy,
3002     .disc_thresh = 3,
3003     .phyctl_offset = REG_PHYCTL_A10,
3004     .dedicated_clocks = true,
3005     diff --git a/drivers/pwm/pwm-rcar.c b/drivers/pwm/pwm-rcar.c
3006     index 1c85ecc9e7ac..0fcf94ffad32 100644
3007     --- a/drivers/pwm/pwm-rcar.c
3008     +++ b/drivers/pwm/pwm-rcar.c
3009     @@ -156,8 +156,12 @@ static int rcar_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
3010     if (div < 0)
3011     return div;
3012    
3013     - /* Let the core driver set pwm->period if disabled and duty_ns == 0 */
3014     - if (!pwm_is_enabled(pwm) && !duty_ns)
3015     + /*
3016     + * Let the core driver set pwm->period if disabled and duty_ns == 0.
3017     + * But, this driver should prevent to set the new duty_ns if current
3018     + * duty_cycle is not set
3019     + */
3020     + if (!pwm_is_enabled(pwm) && !duty_ns && !pwm->state.duty_cycle)
3021     return 0;
3022    
3023     rcar_pwm_update(rp, RCAR_PWMCR_SYNC, RCAR_PWMCR_SYNC, RCAR_PWMCR);
3024     diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
3025     index e1ce8b1b5090..fb2a8b1e7979 100644
3026     --- a/drivers/soc/mediatek/mtk-scpsys.c
3027     +++ b/drivers/soc/mediatek/mtk-scpsys.c
3028     @@ -892,7 +892,7 @@ static int scpsys_probe(struct platform_device *pdev)
3029    
3030     pd_data = &scp->pd_data;
3031    
3032     - for (i = 0, sd = soc->subdomains ; i < soc->num_subdomains ; i++) {
3033     + for (i = 0, sd = soc->subdomains; i < soc->num_subdomains; i++, sd++) {
3034     ret = pm_genpd_add_subdomain(pd_data->domains[sd->origin],
3035     pd_data->domains[sd->subdomain]);
3036     if (ret && IS_ENABLED(CONFIG_PM))
3037     diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
3038     index 669470971023..047875861df1 100644
3039     --- a/drivers/spi/spi-atmel.c
3040     +++ b/drivers/spi/spi-atmel.c
3041     @@ -1489,6 +1489,11 @@ static void atmel_spi_init(struct atmel_spi *as)
3042     {
3043     spi_writel(as, CR, SPI_BIT(SWRST));
3044     spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
3045     +
3046     + /* It is recommended to enable FIFOs first thing after reset */
3047     + if (as->fifo_size)
3048     + spi_writel(as, CR, SPI_BIT(FIFOEN));
3049     +
3050     if (as->caps.has_wdrbt) {
3051     spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
3052     | SPI_BIT(MSTR));
3053     @@ -1499,9 +1504,6 @@ static void atmel_spi_init(struct atmel_spi *as)
3054     if (as->use_pdc)
3055     spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
3056     spi_writel(as, CR, SPI_BIT(SPIEN));
3057     -
3058     - if (as->fifo_size)
3059     - spi_writel(as, CR, SPI_BIT(FIFOEN));
3060     }
3061    
3062     static int atmel_spi_probe(struct platform_device *pdev)
3063     diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
3064     index 3ff0ee88c467..84dfef4bd6ae 100644
3065     --- a/drivers/spi/spi.c
3066     +++ b/drivers/spi/spi.c
3067     @@ -779,8 +779,14 @@ static int spi_map_buf(struct spi_controller *ctlr, struct device *dev,
3068     for (i = 0; i < sgs; i++) {
3069    
3070     if (vmalloced_buf || kmap_buf) {
3071     - min = min_t(size_t,
3072     - len, desc_len - offset_in_page(buf));
3073     + /*
3074     + * Next scatterlist entry size is the minimum between
3075     + * the desc_len and the remaining buffer length that
3076     + * fits in a page.
3077     + */
3078     + min = min_t(size_t, desc_len,
3079     + min_t(size_t, len,
3080     + PAGE_SIZE - offset_in_page(buf)));
3081     if (vmalloced_buf)
3082     vm_page = vmalloc_to_page(buf);
3083     else
3084     @@ -2252,12 +2258,6 @@ void spi_unregister_controller(struct spi_controller *ctlr)
3085     mutex_lock(&board_lock);
3086     found = idr_find(&spi_master_idr, id);
3087     mutex_unlock(&board_lock);
3088     - if (found != ctlr) {
3089     - dev_dbg(&ctlr->dev,
3090     - "attempting to delete unregistered controller [%s]\n",
3091     - dev_name(&ctlr->dev));
3092     - return;
3093     - }
3094     if (ctlr->queued) {
3095     if (spi_destroy_queue(ctlr))
3096     dev_err(&ctlr->dev, "queue remove failed\n");
3097     @@ -2270,7 +2270,8 @@ void spi_unregister_controller(struct spi_controller *ctlr)
3098     device_unregister(&ctlr->dev);
3099     /* free bus id */
3100     mutex_lock(&board_lock);
3101     - idr_remove(&spi_master_idr, id);
3102     + if (found == ctlr)
3103     + idr_remove(&spi_master_idr, id);
3104     mutex_unlock(&board_lock);
3105     }
3106     EXPORT_SYMBOL_GPL(spi_unregister_controller);
3107     diff --git a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
3108     index d8cfed358d55..f1d8cc5a2730 100644
3109     --- a/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
3110     +++ b/drivers/staging/media/atomisp/pci/atomisp2/atomisp_fops.c
3111     @@ -1285,7 +1285,10 @@ const struct v4l2_file_operations atomisp_fops = {
3112     .mmap = atomisp_mmap,
3113     .unlocked_ioctl = video_ioctl2,
3114     #ifdef CONFIG_COMPAT
3115     + /*
3116     + * There are problems with this code. Disable this for now.
3117     .compat_ioctl32 = atomisp_compat_ioctl32,
3118     + */
3119     #endif
3120     .poll = atomisp_poll,
3121     };
3122     @@ -1297,7 +1300,10 @@ const struct v4l2_file_operations atomisp_file_fops = {
3123     .mmap = atomisp_file_mmap,
3124     .unlocked_ioctl = video_ioctl2,
3125     #ifdef CONFIG_COMPAT
3126     + /*
3127     + * There are problems with this code. Disable this for now.
3128     .compat_ioctl32 = atomisp_compat_ioctl32,
3129     + */
3130     #endif
3131     .poll = atomisp_poll,
3132     };
3133     diff --git a/drivers/staging/media/lirc/lirc_zilog.c b/drivers/staging/media/lirc/lirc_zilog.c
3134     index 71af13bd0ebd..e35e1b2160e3 100644
3135     --- a/drivers/staging/media/lirc/lirc_zilog.c
3136     +++ b/drivers/staging/media/lirc/lirc_zilog.c
3137     @@ -288,7 +288,7 @@ static void release_ir_tx(struct kref *ref)
3138     struct IR_tx *tx = container_of(ref, struct IR_tx, ref);
3139     struct IR *ir = tx->ir;
3140    
3141     - ir->l.features &= ~LIRC_CAN_SEND_LIRCCODE;
3142     + ir->l.features &= ~LIRC_CAN_SEND_PULSE;
3143     /* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */
3144     ir->tx = NULL;
3145     kfree(tx);
3146     @@ -1228,6 +1228,7 @@ static unsigned int poll(struct file *filep, poll_table *wait)
3147    
3148     dev_dbg(ir->l.dev, "%s result = %s\n", __func__,
3149     ret ? "POLLIN|POLLRDNORM" : "none");
3150     + put_ir_rx(rx, false);
3151     return ret;
3152     }
3153    
3154     @@ -1267,14 +1268,14 @@ static long ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3155     if (!(features & LIRC_CAN_SEND_MASK))
3156     return -ENOTTY;
3157    
3158     - result = put_user(LIRC_MODE_LIRCCODE, uptr);
3159     + result = put_user(LIRC_MODE_PULSE, uptr);
3160     break;
3161     case LIRC_SET_SEND_MODE:
3162     if (!(features & LIRC_CAN_SEND_MASK))
3163     return -ENOTTY;
3164    
3165     result = get_user(mode, uptr);
3166     - if (!result && mode != LIRC_MODE_LIRCCODE)
3167     + if (!result && mode != LIRC_MODE_PULSE)
3168     return -EINVAL;
3169     break;
3170     default:
3171     @@ -1512,7 +1513,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
3172     kref_init(&tx->ref);
3173     ir->tx = tx;
3174    
3175     - ir->l.features |= LIRC_CAN_SEND_LIRCCODE;
3176     + ir->l.features |= LIRC_CAN_SEND_PULSE;
3177     mutex_init(&tx->client_lock);
3178     tx->c = client;
3179     tx->need_boot = 1;
3180     diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
3181     index 4798b4b1fd77..41c6154ae856 100644
3182     --- a/drivers/thermal/imx_thermal.c
3183     +++ b/drivers/thermal/imx_thermal.c
3184     @@ -601,6 +601,9 @@ static int imx_thermal_probe(struct platform_device *pdev)
3185     regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
3186     regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
3187    
3188     + data->irq_enabled = true;
3189     + data->mode = THERMAL_DEVICE_ENABLED;
3190     +
3191     ret = devm_request_threaded_irq(&pdev->dev, data->irq,
3192     imx_thermal_alarm_irq, imx_thermal_alarm_irq_thread,
3193     0, "imx_thermal", data);
3194     @@ -613,9 +616,6 @@ static int imx_thermal_probe(struct platform_device *pdev)
3195     return ret;
3196     }
3197    
3198     - data->irq_enabled = true;
3199     - data->mode = THERMAL_DEVICE_ENABLED;
3200     -
3201     return 0;
3202     }
3203    
3204     diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
3205     index 53250fc057e1..91830b1bdcaf 100644
3206     --- a/drivers/thunderbolt/icm.c
3207     +++ b/drivers/thunderbolt/icm.c
3208     @@ -539,14 +539,14 @@ static bool icm_ar_is_supported(struct tb *tb)
3209     static int icm_ar_get_mode(struct tb *tb)
3210     {
3211     struct tb_nhi *nhi = tb->nhi;
3212     - int retries = 5;
3213     + int retries = 60;
3214     u32 val;
3215    
3216     do {
3217     val = ioread32(nhi->iobase + REG_FW_STS);
3218     if (val & REG_FW_STS_NVM_AUTH_DONE)
3219     break;
3220     - msleep(30);
3221     + msleep(50);
3222     } while (--retries);
3223    
3224     if (!retries) {
3225     @@ -720,6 +720,9 @@ static int icm_firmware_reset(struct tb *tb, struct tb_nhi *nhi)
3226     struct icm *icm = tb_priv(tb);
3227     u32 val;
3228    
3229     + if (!icm->upstream_port)
3230     + return -ENODEV;
3231     +
3232     /* Put ARC to wait for CIO reset event to happen */
3233     val = ioread32(nhi->iobase + REG_FW_STS);
3234     val |= REG_FW_STS_CIO_RESET_REQ;
3235     @@ -859,6 +862,9 @@ static int icm_firmware_init(struct tb *tb)
3236     break;
3237    
3238     default:
3239     + if (ret < 0)
3240     + return ret;
3241     +
3242     tb_err(tb, "ICM firmware is in wrong mode: %u\n", ret);
3243     return -ENODEV;
3244     }
3245     diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c
3246     index 05af126a2435..16c607075ede 100644
3247     --- a/drivers/thunderbolt/nhi.c
3248     +++ b/drivers/thunderbolt/nhi.c
3249     @@ -846,6 +846,7 @@ static const struct dev_pm_ops nhi_pm_ops = {
3250     * we just disable hotplug, the
3251     * pci-tunnels stay alive.
3252     */
3253     + .thaw_noirq = nhi_resume_noirq,
3254     .restore_noirq = nhi_resume_noirq,
3255     .suspend = nhi_suspend,
3256     .freeze = nhi_suspend,
3257     diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
3258     index 3953d17202a8..8bd137109980 100644
3259     --- a/drivers/thunderbolt/switch.c
3260     +++ b/drivers/thunderbolt/switch.c
3261     @@ -716,6 +716,13 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
3262     if (sw->authorized)
3263     goto unlock;
3264    
3265     + /*
3266     + * Make sure there is no PCIe rescan ongoing when a new PCIe
3267     + * tunnel is created. Otherwise the PCIe rescan code might find
3268     + * the new tunnel too early.
3269     + */
3270     + pci_lock_rescan_remove();
3271     +
3272     switch (val) {
3273     /* Approve switch */
3274     case 1:
3275     @@ -735,6 +742,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
3276     break;
3277     }
3278    
3279     + pci_unlock_rescan_remove();
3280     +
3281     if (!ret) {
3282     sw->authorized = val;
3283     /* Notify status change to the userspace */
3284     diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
3285     index faf50df81622..1c70541a1467 100644
3286     --- a/drivers/tty/n_tty.c
3287     +++ b/drivers/tty/n_tty.c
3288     @@ -2182,6 +2182,12 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
3289     }
3290     if (tty_hung_up_p(file))
3291     break;
3292     + /*
3293     + * Abort readers for ttys which never actually
3294     + * get hung up. See __tty_hangup().
3295     + */
3296     + if (test_bit(TTY_HUPPING, &tty->flags))
3297     + break;
3298     if (!timeout)
3299     break;
3300     if (file->f_flags & O_NONBLOCK) {
3301     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
3302     index 7e77bd2118ad..52627478ab61 100644
3303     --- a/drivers/tty/tty_io.c
3304     +++ b/drivers/tty/tty_io.c
3305     @@ -585,6 +585,14 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
3306     return;
3307     }
3308    
3309     + /*
3310     + * Some console devices aren't actually hung up for technical and
3311     + * historical reasons, which can lead to indefinite interruptible
3312     + * sleep in n_tty_read(). The following explicitly tells
3313     + * n_tty_read() to abort readers.
3314     + */
3315     + set_bit(TTY_HUPPING, &tty->flags);
3316     +
3317     /* inuse_filps is protected by the single tty lock,
3318     this really needs to change if we want to flush the
3319     workqueue with the lock held */
3320     @@ -639,6 +647,7 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session)
3321     * from the ldisc side, which is now guaranteed.
3322     */
3323     set_bit(TTY_HUPPED, &tty->flags);
3324     + clear_bit(TTY_HUPPING, &tty->flags);
3325     tty_unlock(tty);
3326    
3327     if (f)
3328     diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c
3329     index bd3e0c5a6db2..212289c55b6f 100644
3330     --- a/drivers/usb/core/generic.c
3331     +++ b/drivers/usb/core/generic.c
3332     @@ -210,8 +210,13 @@ static int generic_suspend(struct usb_device *udev, pm_message_t msg)
3333     if (!udev->parent)
3334     rc = hcd_bus_suspend(udev, msg);
3335    
3336     - /* Non-root devices don't need to do anything for FREEZE or PRETHAW */
3337     - else if (msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
3338     + /*
3339     + * Non-root USB2 devices don't need to do anything for FREEZE
3340     + * or PRETHAW. USB3 devices don't support global suspend and
3341     + * needs to be selectively suspended.
3342     + */
3343     + else if ((msg.event == PM_EVENT_FREEZE || msg.event == PM_EVENT_PRETHAW)
3344     + && (udev->speed < USB_SPEED_SUPER))
3345     rc = 0;
3346     else
3347     rc = usb_port_suspend(udev, msg);
3348     diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
3349     index 3219d8157f5b..c3f3f1a89b0f 100644
3350     --- a/drivers/usb/dwc3/core.c
3351     +++ b/drivers/usb/dwc3/core.c
3352     @@ -128,6 +128,9 @@ static void __dwc3_set_mode(struct work_struct *work)
3353     if (dwc->dr_mode != USB_DR_MODE_OTG)
3354     return;
3355    
3356     + if (dwc->desired_dr_role == DWC3_GCTL_PRTCAP_OTG)
3357     + return;
3358     +
3359     switch (dwc->current_dr_role) {
3360     case DWC3_GCTL_PRTCAP_HOST:
3361     dwc3_host_exit(dwc);
3362     diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
3363     index 54343fbd85ee..bc5e91d4fac8 100644
3364     --- a/drivers/usb/dwc3/dwc3-pci.c
3365     +++ b/drivers/usb/dwc3/dwc3-pci.c
3366     @@ -212,7 +212,7 @@ static int dwc3_pci_probe(struct pci_dev *pci,
3367     ret = platform_device_add_resources(dwc->dwc3, res, ARRAY_SIZE(res));
3368     if (ret) {
3369     dev_err(dev, "couldn't add resources to dwc3 device\n");
3370     - return ret;
3371     + goto err;
3372     }
3373    
3374     dwc->pci = pci;
3375     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3376     index 0ebdb313bb00..fe75e969f5ac 100644
3377     --- a/drivers/usb/dwc3/gadget.c
3378     +++ b/drivers/usb/dwc3/gadget.c
3379     @@ -174,18 +174,8 @@ static void dwc3_ep_inc_deq(struct dwc3_ep *dep)
3380     dwc3_ep_inc_trb(&dep->trb_dequeue);
3381     }
3382    
3383     -/**
3384     - * dwc3_gadget_giveback - call struct usb_request's ->complete callback
3385     - * @dep: The endpoint to whom the request belongs to
3386     - * @req: The request we're giving back
3387     - * @status: completion code for the request
3388     - *
3389     - * Must be called with controller's lock held and interrupts disabled. This
3390     - * function will unmap @req and call its ->complete() callback to notify upper
3391     - * layers that it has completed.
3392     - */
3393     -void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
3394     - int status)
3395     +void dwc3_gadget_del_and_unmap_request(struct dwc3_ep *dep,
3396     + struct dwc3_request *req, int status)
3397     {
3398     struct dwc3 *dwc = dep->dwc;
3399    
3400     @@ -198,18 +188,35 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
3401    
3402     if (req->trb)
3403     usb_gadget_unmap_request_by_dev(dwc->sysdev,
3404     - &req->request, req->direction);
3405     + &req->request, req->direction);
3406    
3407     req->trb = NULL;
3408     -
3409     trace_dwc3_gadget_giveback(req);
3410    
3411     + if (dep->number > 1)
3412     + pm_runtime_put(dwc->dev);
3413     +}
3414     +
3415     +/**
3416     + * dwc3_gadget_giveback - call struct usb_request's ->complete callback
3417     + * @dep: The endpoint to whom the request belongs to
3418     + * @req: The request we're giving back
3419     + * @status: completion code for the request
3420     + *
3421     + * Must be called with controller's lock held and interrupts disabled. This
3422     + * function will unmap @req and call its ->complete() callback to notify upper
3423     + * layers that it has completed.
3424     + */
3425     +void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
3426     + int status)
3427     +{
3428     + struct dwc3 *dwc = dep->dwc;
3429     +
3430     + dwc3_gadget_del_and_unmap_request(dep, req, status);
3431     +
3432     spin_unlock(&dwc->lock);
3433     usb_gadget_giveback_request(&dep->endpoint, &req->request);
3434     spin_lock(&dwc->lock);
3435     -
3436     - if (dep->number > 1)
3437     - pm_runtime_put(dwc->dev);
3438     }
3439    
3440     /**
3441     @@ -1233,7 +1240,7 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param)
3442     if (req->trb)
3443     memset(req->trb, 0, sizeof(struct dwc3_trb));
3444     dep->queued_requests--;
3445     - dwc3_gadget_giveback(dep, req, ret);
3446     + dwc3_gadget_del_and_unmap_request(dep, req, ret);
3447     return ret;
3448     }
3449    
3450     diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
3451     index 5d3d7941d2c2..71cf552b8828 100644
3452     --- a/drivers/usb/gadget/function/f_midi.c
3453     +++ b/drivers/usb/gadget/function/f_midi.c
3454     @@ -405,7 +405,8 @@ static int f_midi_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
3455     if (err) {
3456     ERROR(midi, "%s: couldn't enqueue request: %d\n",
3457     midi->out_ep->name, err);
3458     - free_ep_req(midi->out_ep, req);
3459     + if (req->buf != NULL)
3460     + free_ep_req(midi->out_ep, req);
3461     return err;
3462     }
3463     }
3464     diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
3465     index 7d53a4773d1a..2f03334c6874 100644
3466     --- a/drivers/usb/gadget/u_f.h
3467     +++ b/drivers/usb/gadget/u_f.h
3468     @@ -64,7 +64,9 @@ struct usb_request *alloc_ep_req(struct usb_ep *ep, size_t len);
3469     /* Frees a usb_request previously allocated by alloc_ep_req() */
3470     static inline void free_ep_req(struct usb_ep *ep, struct usb_request *req)
3471     {
3472     + WARN_ON(req->buf == NULL);
3473     kfree(req->buf);
3474     + req->buf = NULL;
3475     usb_ep_free_request(ep, req);
3476     }
3477    
3478     diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
3479     index 794bb4958383..5bab09294a8a 100644
3480     --- a/drivers/usb/gadget/udc/core.c
3481     +++ b/drivers/usb/gadget/udc/core.c
3482     @@ -249,6 +249,9 @@ EXPORT_SYMBOL_GPL(usb_ep_free_request);
3483     * arranges to poll once per interval, and the gadget driver usually will
3484     * have queued some data to transfer at that time.
3485     *
3486     + * Note that @req's ->complete() callback must never be called from
3487     + * within usb_ep_queue() as that can create deadlock situations.
3488     + *
3489     * Returns zero, or a negative error code. Endpoints that are not enabled
3490     * report errors; errors will also be
3491     * reported when the usb peripheral is disconnected.
3492     diff --git a/drivers/usb/musb/musb_gadget_ep0.c b/drivers/usb/musb/musb_gadget_ep0.c
3493     index 844a309fe895..e85b9c2a4910 100644
3494     --- a/drivers/usb/musb/musb_gadget_ep0.c
3495     +++ b/drivers/usb/musb/musb_gadget_ep0.c
3496     @@ -114,15 +114,19 @@ static int service_tx_status_request(
3497     }
3498    
3499     is_in = epnum & USB_DIR_IN;
3500     - if (is_in) {
3501     - epnum &= 0x0f;
3502     + epnum &= 0x0f;
3503     + if (epnum >= MUSB_C_NUM_EPS) {
3504     + handled = -EINVAL;
3505     + break;
3506     + }
3507     +
3508     + if (is_in)
3509     ep = &musb->endpoints[epnum].ep_in;
3510     - } else {
3511     + else
3512     ep = &musb->endpoints[epnum].ep_out;
3513     - }
3514     regs = musb->endpoints[epnum].regs;
3515    
3516     - if (epnum >= MUSB_C_NUM_EPS || !ep->desc) {
3517     + if (!ep->desc) {
3518     handled = -EINVAL;
3519     break;
3520     }
3521     diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
3522     index 91335e6de88a..115a36f6f403 100644
3523     --- a/drivers/vfio/pci/vfio_pci_config.c
3524     +++ b/drivers/vfio/pci/vfio_pci_config.c
3525     @@ -808,6 +808,7 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
3526     {
3527     __le16 *ctrl = (__le16 *)(vdev->vconfig + pos -
3528     offset + PCI_EXP_DEVCTL);
3529     + int readrq = le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ;
3530    
3531     count = vfio_default_config_write(vdev, pos, count, perm, offset, val);
3532     if (count < 0)
3533     @@ -833,6 +834,27 @@ static int vfio_exp_config_write(struct vfio_pci_device *vdev, int pos,
3534     pci_try_reset_function(vdev->pdev);
3535     }
3536    
3537     + /*
3538     + * MPS is virtualized to the user, writes do not change the physical
3539     + * register since determining a proper MPS value requires a system wide
3540     + * device view. The MRRS is largely independent of MPS, but since the
3541     + * user does not have that system-wide view, they might set a safe, but
3542     + * inefficiently low value. Here we allow writes through to hardware,
3543     + * but we set the floor to the physical device MPS setting, so that
3544     + * we can at least use full TLPs, as defined by the MPS value.
3545     + *
3546     + * NB, if any devices actually depend on an artificially low MRRS
3547     + * setting, this will need to be revisited, perhaps with a quirk
3548     + * though pcie_set_readrq().
3549     + */
3550     + if (readrq != (le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ)) {
3551     + readrq = 128 <<
3552     + ((le16_to_cpu(*ctrl) & PCI_EXP_DEVCTL_READRQ) >> 12);
3553     + readrq = max(readrq, pcie_get_mps(vdev->pdev));
3554     +
3555     + pcie_set_readrq(vdev->pdev, readrq);
3556     + }
3557     +
3558     return count;
3559     }
3560    
3561     @@ -851,11 +873,12 @@ static int __init init_pci_cap_exp_perm(struct perm_bits *perm)
3562     * Allow writes to device control fields, except devctl_phantom,
3563     * which could confuse IOMMU, MPS, which can break communication
3564     * with other physical devices, and the ARI bit in devctl2, which
3565     - * is set at probe time. FLR gets virtualized via our writefn.
3566     + * is set at probe time. FLR and MRRS get virtualized via our
3567     + * writefn.
3568     */
3569     p_setw(perm, PCI_EXP_DEVCTL,
3570     - PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD,
3571     - ~PCI_EXP_DEVCTL_PHANTOM);
3572     + PCI_EXP_DEVCTL_BCR_FLR | PCI_EXP_DEVCTL_PAYLOAD |
3573     + PCI_EXP_DEVCTL_READRQ, ~PCI_EXP_DEVCTL_PHANTOM);
3574     p_setw(perm, PCI_EXP_DEVCTL2, NO_VIRT, ~PCI_EXP_DEVCTL2_ARI);
3575     return 0;
3576     }
3577     diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c
3578     index 8658dba21768..e682bf046e50 100644
3579     --- a/drivers/watchdog/f71808e_wdt.c
3580     +++ b/drivers/watchdog/f71808e_wdt.c
3581     @@ -496,7 +496,7 @@ static bool watchdog_is_running(void)
3582    
3583     is_running = (superio_inb(watchdog.sioaddr, SIO_REG_ENABLE) & BIT(0))
3584     && (superio_inb(watchdog.sioaddr, F71808FG_REG_WDT_CONF)
3585     - & F71808FG_FLAG_WD_EN);
3586     + & BIT(F71808FG_FLAG_WD_EN));
3587    
3588     superio_exit(watchdog.sioaddr);
3589    
3590     diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c
3591     index 82e8f6edfb48..b12e37f27530 100644
3592     --- a/fs/autofs4/root.c
3593     +++ b/fs/autofs4/root.c
3594     @@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir,
3595    
3596     autofs4_del_active(dentry);
3597    
3598     - inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555);
3599     + inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode);
3600     if (!inode)
3601     return -ENOMEM;
3602     d_add(dentry, inode);
3603     diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
3604     index d5b2e12b5d02..cb0f1fbe836d 100644
3605     --- a/fs/cifs/Kconfig
3606     +++ b/fs/cifs/Kconfig
3607     @@ -190,6 +190,7 @@ config CIFS_NFSD_EXPORT
3608     config CIFS_SMB311
3609     bool "SMB3.1.1 network file system support (Experimental)"
3610     depends on CIFS
3611     + select CRYPTO_SHA512
3612    
3613     help
3614     This enables experimental support for the newest, SMB3.1.1, dialect.
3615     diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
3616     index f2b0a7f124da..a6ef088e057b 100644
3617     --- a/fs/cifs/cifsencrypt.c
3618     +++ b/fs/cifs/cifsencrypt.c
3619     @@ -36,37 +36,6 @@
3620     #include <crypto/skcipher.h>
3621     #include <crypto/aead.h>
3622    
3623     -static int
3624     -cifs_crypto_shash_md5_allocate(struct TCP_Server_Info *server)
3625     -{
3626     - int rc;
3627     - unsigned int size;
3628     -
3629     - if (server->secmech.sdescmd5 != NULL)
3630     - return 0; /* already allocated */
3631     -
3632     - server->secmech.md5 = crypto_alloc_shash("md5", 0, 0);
3633     - if (IS_ERR(server->secmech.md5)) {
3634     - cifs_dbg(VFS, "could not allocate crypto md5\n");
3635     - rc = PTR_ERR(server->secmech.md5);
3636     - server->secmech.md5 = NULL;
3637     - return rc;
3638     - }
3639     -
3640     - size = sizeof(struct shash_desc) +
3641     - crypto_shash_descsize(server->secmech.md5);
3642     - server->secmech.sdescmd5 = kmalloc(size, GFP_KERNEL);
3643     - if (!server->secmech.sdescmd5) {
3644     - crypto_free_shash(server->secmech.md5);
3645     - server->secmech.md5 = NULL;
3646     - return -ENOMEM;
3647     - }
3648     - server->secmech.sdescmd5->shash.tfm = server->secmech.md5;
3649     - server->secmech.sdescmd5->shash.flags = 0x0;
3650     -
3651     - return 0;
3652     -}
3653     -
3654     int __cifs_calc_signature(struct smb_rqst *rqst,
3655     struct TCP_Server_Info *server, char *signature,
3656     struct shash_desc *shash)
3657     @@ -132,13 +101,10 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
3658     if (!rqst->rq_iov || !signature || !server)
3659     return -EINVAL;
3660    
3661     - if (!server->secmech.sdescmd5) {
3662     - rc = cifs_crypto_shash_md5_allocate(server);
3663     - if (rc) {
3664     - cifs_dbg(VFS, "%s: Can't alloc md5 crypto\n", __func__);
3665     - return -1;
3666     - }
3667     - }
3668     + rc = cifs_alloc_hash("md5", &server->secmech.md5,
3669     + &server->secmech.sdescmd5);
3670     + if (rc)
3671     + return -1;
3672    
3673     rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
3674     if (rc) {
3675     @@ -663,37 +629,6 @@ CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash)
3676     return rc;
3677     }
3678    
3679     -static int crypto_hmacmd5_alloc(struct TCP_Server_Info *server)
3680     -{
3681     - int rc;
3682     - unsigned int size;
3683     -
3684     - /* check if already allocated */
3685     - if (server->secmech.sdeschmacmd5)
3686     - return 0;
3687     -
3688     - server->secmech.hmacmd5 = crypto_alloc_shash("hmac(md5)", 0, 0);
3689     - if (IS_ERR(server->secmech.hmacmd5)) {
3690     - cifs_dbg(VFS, "could not allocate crypto hmacmd5\n");
3691     - rc = PTR_ERR(server->secmech.hmacmd5);
3692     - server->secmech.hmacmd5 = NULL;
3693     - return rc;
3694     - }
3695     -
3696     - size = sizeof(struct shash_desc) +
3697     - crypto_shash_descsize(server->secmech.hmacmd5);
3698     - server->secmech.sdeschmacmd5 = kmalloc(size, GFP_KERNEL);
3699     - if (!server->secmech.sdeschmacmd5) {
3700     - crypto_free_shash(server->secmech.hmacmd5);
3701     - server->secmech.hmacmd5 = NULL;
3702     - return -ENOMEM;
3703     - }
3704     - server->secmech.sdeschmacmd5->shash.tfm = server->secmech.hmacmd5;
3705     - server->secmech.sdeschmacmd5->shash.flags = 0x0;
3706     -
3707     - return 0;
3708     -}
3709     -
3710     int
3711     setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
3712     {
3713     @@ -757,9 +692,10 @@ setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp)
3714    
3715     mutex_lock(&ses->server->srv_mutex);
3716    
3717     - rc = crypto_hmacmd5_alloc(ses->server);
3718     + rc = cifs_alloc_hash("hmac(md5)",
3719     + &ses->server->secmech.hmacmd5,
3720     + &ses->server->secmech.sdeschmacmd5);
3721     if (rc) {
3722     - cifs_dbg(VFS, "could not crypto alloc hmacmd5 rc %d\n", rc);
3723     goto unlock;
3724     }
3725    
3726     @@ -893,6 +829,11 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
3727     server->secmech.md5 = NULL;
3728     }
3729    
3730     + if (server->secmech.sha512) {
3731     + crypto_free_shash(server->secmech.sha512);
3732     + server->secmech.sha512 = NULL;
3733     + }
3734     +
3735     if (server->secmech.hmacmd5) {
3736     crypto_free_shash(server->secmech.hmacmd5);
3737     server->secmech.hmacmd5 = NULL;
3738     @@ -916,4 +857,6 @@ cifs_crypto_secmech_release(struct TCP_Server_Info *server)
3739     server->secmech.sdeschmacmd5 = NULL;
3740     kfree(server->secmech.sdescmd5);
3741     server->secmech.sdescmd5 = NULL;
3742     + kfree(server->secmech.sdescsha512);
3743     + server->secmech.sdescsha512 = NULL;
3744     }
3745     diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
3746     index 8c8b75d33f31..dbcd2e066066 100644
3747     --- a/fs/cifs/cifsfs.c
3748     +++ b/fs/cifs/cifsfs.c
3749     @@ -1476,6 +1476,7 @@ MODULE_SOFTDEP("pre: nls");
3750     MODULE_SOFTDEP("pre: aes");
3751     MODULE_SOFTDEP("pre: cmac");
3752     MODULE_SOFTDEP("pre: sha256");
3753     +MODULE_SOFTDEP("pre: sha512");
3754     MODULE_SOFTDEP("pre: aead2");
3755     MODULE_SOFTDEP("pre: ccm");
3756     module_init(init_cifs)
3757     diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
3758     index e185b2853eab..33d6eb58ce34 100644
3759     --- a/fs/cifs/cifsglob.h
3760     +++ b/fs/cifs/cifsglob.h
3761     @@ -130,10 +130,12 @@ struct cifs_secmech {
3762     struct crypto_shash *md5; /* md5 hash function */
3763     struct crypto_shash *hmacsha256; /* hmac-sha256 hash function */
3764     struct crypto_shash *cmacaes; /* block-cipher based MAC function */
3765     + struct crypto_shash *sha512; /* sha512 hash function */
3766     struct sdesc *sdeschmacmd5; /* ctxt to generate ntlmv2 hash, CR1 */
3767     struct sdesc *sdescmd5; /* ctxt to generate cifs/smb signature */
3768     struct sdesc *sdeschmacsha256; /* ctxt to generate smb2 signature */
3769     struct sdesc *sdesccmacaes; /* ctxt to generate smb3 signature */
3770     + struct sdesc *sdescsha512; /* ctxt to generate smb3.11 signing key */
3771     struct crypto_aead *ccmaesencrypt; /* smb3 encryption aead */
3772     struct crypto_aead *ccmaesdecrypt; /* smb3 decryption aead */
3773     };
3774     @@ -1449,6 +1451,7 @@ struct dfs_info3_param {
3775     #define CIFS_FATTR_NEED_REVAL 0x4
3776     #define CIFS_FATTR_INO_COLLISION 0x8
3777     #define CIFS_FATTR_UNKNOWN_NLINK 0x10
3778     +#define CIFS_FATTR_FAKE_ROOT_INO 0x20
3779    
3780     struct cifs_fattr {
3781     u32 cf_flags;
3782     diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
3783     index 4143c9dec463..762d513a5087 100644
3784     --- a/fs/cifs/cifsproto.h
3785     +++ b/fs/cifs/cifsproto.h
3786     @@ -538,4 +538,9 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
3787     struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
3788     void cifs_aio_ctx_release(struct kref *refcount);
3789     int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
3790     +
3791     +int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
3792     + struct sdesc **sdesc);
3793     +void cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc);
3794     +
3795     #endif /* _CIFSPROTO_H */
3796     diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
3797     index 7c732cb44164..0c7b7e2a0919 100644
3798     --- a/fs/cifs/inode.c
3799     +++ b/fs/cifs/inode.c
3800     @@ -707,6 +707,18 @@ cifs_get_file_info(struct file *filp)
3801     return rc;
3802     }
3803    
3804     +/* Simple function to return a 64 bit hash of string. Rarely called */
3805     +static __u64 simple_hashstr(const char *str)
3806     +{
3807     + const __u64 hash_mult = 1125899906842597L; /* a big enough prime */
3808     + __u64 hash = 0;
3809     +
3810     + while (*str)
3811     + hash = (hash + (__u64) *str++) * hash_mult;
3812     +
3813     + return hash;
3814     +}
3815     +
3816     int
3817     cifs_get_inode_info(struct inode **inode, const char *full_path,
3818     FILE_ALL_INFO *data, struct super_block *sb, int xid,
3819     @@ -816,6 +828,14 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
3820     tmprc);
3821     fattr.cf_uniqueid = iunique(sb, ROOT_I);
3822     cifs_autodisable_serverino(cifs_sb);
3823     + } else if ((fattr.cf_uniqueid == 0) &&
3824     + strlen(full_path) == 0) {
3825     + /* some servers ret bad root ino ie 0 */
3826     + cifs_dbg(FYI, "Invalid (0) inodenum\n");
3827     + fattr.cf_flags |=
3828     + CIFS_FATTR_FAKE_ROOT_INO;
3829     + fattr.cf_uniqueid =
3830     + simple_hashstr(tcon->treeName);
3831     }
3832     }
3833     } else
3834     @@ -832,6 +852,16 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
3835     &fattr.cf_uniqueid, data);
3836     if (tmprc)
3837     fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
3838     + else if ((fattr.cf_uniqueid == 0) &&
3839     + strlen(full_path) == 0) {
3840     + /*
3841     + * Reuse existing root inode num since
3842     + * inum zero for root causes ls of . and .. to
3843     + * not be returned
3844     + */
3845     + cifs_dbg(FYI, "Srv ret 0 inode num for root\n");
3846     + fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
3847     + }
3848     } else
3849     fattr.cf_uniqueid = CIFS_I(*inode)->uniqueid;
3850     }
3851     @@ -893,6 +923,9 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
3852     }
3853    
3854     cgii_exit:
3855     + if ((*inode) && ((*inode)->i_ino == 0))
3856     + cifs_dbg(FYI, "inode number of zero returned\n");
3857     +
3858     kfree(buf);
3859     cifs_put_tlink(tlink);
3860     return rc;
3861     diff --git a/fs/cifs/link.c b/fs/cifs/link.c
3862     index 60b5a11ee11b..889a840172eb 100644
3863     --- a/fs/cifs/link.c
3864     +++ b/fs/cifs/link.c
3865     @@ -50,25 +50,12 @@ static int
3866     symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
3867     {
3868     int rc;
3869     - unsigned int size;
3870     - struct crypto_shash *md5;
3871     - struct sdesc *sdescmd5;
3872     -
3873     - md5 = crypto_alloc_shash("md5", 0, 0);
3874     - if (IS_ERR(md5)) {
3875     - rc = PTR_ERR(md5);
3876     - cifs_dbg(VFS, "%s: Crypto md5 allocation error %d\n",
3877     - __func__, rc);
3878     - return rc;
3879     - }
3880     - size = sizeof(struct shash_desc) + crypto_shash_descsize(md5);
3881     - sdescmd5 = kmalloc(size, GFP_KERNEL);
3882     - if (!sdescmd5) {
3883     - rc = -ENOMEM;
3884     + struct crypto_shash *md5 = NULL;
3885     + struct sdesc *sdescmd5 = NULL;
3886     +
3887     + rc = cifs_alloc_hash("md5", &md5, &sdescmd5);
3888     + if (rc)
3889     goto symlink_hash_err;
3890     - }
3891     - sdescmd5->shash.tfm = md5;
3892     - sdescmd5->shash.flags = 0x0;
3893    
3894     rc = crypto_shash_init(&sdescmd5->shash);
3895     if (rc) {
3896     @@ -85,9 +72,7 @@ symlink_hash(unsigned int link_len, const char *link_str, u8 *md5_hash)
3897     cifs_dbg(VFS, "%s: Could not generate md5 hash\n", __func__);
3898    
3899     symlink_hash_err:
3900     - crypto_free_shash(md5);
3901     - kfree(sdescmd5);
3902     -
3903     + cifs_free_hash(&md5, &sdescmd5);
3904     return rc;
3905     }
3906    
3907     diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
3908     index a0dbced4a45c..460084a8eac5 100644
3909     --- a/fs/cifs/misc.c
3910     +++ b/fs/cifs/misc.c
3911     @@ -848,3 +848,57 @@ setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
3912     iov_iter_bvec(&ctx->iter, ITER_BVEC | rw, ctx->bv, npages, ctx->len);
3913     return 0;
3914     }
3915     +
3916     +/**
3917     + * cifs_alloc_hash - allocate hash and hash context together
3918     + *
3919     + * The caller has to make sure @sdesc is initialized to either NULL or
3920     + * a valid context. Both can be freed via cifs_free_hash().
3921     + */
3922     +int
3923     +cifs_alloc_hash(const char *name,
3924     + struct crypto_shash **shash, struct sdesc **sdesc)
3925     +{
3926     + int rc = 0;
3927     + size_t size;
3928     +
3929     + if (*sdesc != NULL)
3930     + return 0;
3931     +
3932     + *shash = crypto_alloc_shash(name, 0, 0);
3933     + if (IS_ERR(*shash)) {
3934     + cifs_dbg(VFS, "could not allocate crypto %s\n", name);
3935     + rc = PTR_ERR(*shash);
3936     + *shash = NULL;
3937     + *sdesc = NULL;
3938     + return rc;
3939     + }
3940     +
3941     + size = sizeof(struct shash_desc) + crypto_shash_descsize(*shash);
3942     + *sdesc = kmalloc(size, GFP_KERNEL);
3943     + if (*sdesc == NULL) {
3944     + cifs_dbg(VFS, "no memory left to allocate crypto %s\n", name);
3945     + crypto_free_shash(*shash);
3946     + *shash = NULL;
3947     + return -ENOMEM;
3948     + }
3949     +
3950     + (*sdesc)->shash.tfm = *shash;
3951     + (*sdesc)->shash.flags = 0x0;
3952     + return 0;
3953     +}
3954     +
3955     +/**
3956     + * cifs_free_hash - free hash and hash context together
3957     + *
3958     + * Freeing a NULL hash or context is safe.
3959     + */
3960     +void
3961     +cifs_free_hash(struct crypto_shash **shash, struct sdesc **sdesc)
3962     +{
3963     + kfree(*sdesc);
3964     + *sdesc = NULL;
3965     + if (*shash)
3966     + crypto_free_shash(*shash);
3967     + *shash = NULL;
3968     +}
3969     diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
3970     index bdb963d0ba32..eef875da7c0b 100644
3971     --- a/fs/cifs/smb2ops.c
3972     +++ b/fs/cifs/smb2ops.c
3973     @@ -2060,6 +2060,15 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, struct smb_rqst *old_rq)
3974     inc_rfc1001_len(tr_hdr, orig_len);
3975     }
3976    
3977     +/* We can not use the normal sg_set_buf() as we will sometimes pass a
3978     + * stack object as buf.
3979     + */
3980     +static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
3981     + unsigned int buflen)
3982     +{
3983     + sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
3984     +}
3985     +
3986     static struct scatterlist *
3987     init_sg(struct smb_rqst *rqst, u8 *sign)
3988     {
3989     @@ -2074,16 +2083,16 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
3990     return NULL;
3991    
3992     sg_init_table(sg, sg_len);
3993     - sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 24, assoc_data_len);
3994     + smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 24, assoc_data_len);
3995     for (i = 1; i < rqst->rq_nvec; i++)
3996     - sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
3997     + smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
3998     rqst->rq_iov[i].iov_len);
3999     for (j = 0; i < sg_len - 1; i++, j++) {
4000     unsigned int len = (j < rqst->rq_npages - 1) ? rqst->rq_pagesz
4001     : rqst->rq_tailsz;
4002     sg_set_page(&sg[i], rqst->rq_pages[j], len, 0);
4003     }
4004     - sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE);
4005     + smb2_sg_set_buf(&sg[sg_len - 1], sign, SMB2_SIGNATURE_SIZE);
4006     return sg;
4007     }
4008    
4009     diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
4010     index 66af1f8a13cc..49779d952cd5 100644
4011     --- a/fs/cifs/smb2pdu.c
4012     +++ b/fs/cifs/smb2pdu.c
4013     @@ -1773,8 +1773,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
4014     rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
4015     &name_len,
4016     tcon->treeName, path);
4017     - if (rc)
4018     + if (rc) {
4019     + cifs_small_buf_release(req);
4020     return rc;
4021     + }
4022     req->NameLength = cpu_to_le16(name_len * 2);
4023     uni_path_len = copy_size;
4024     path = copy_path;
4025     @@ -1785,8 +1787,10 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
4026     if (uni_path_len % 8 != 0) {
4027     copy_size = roundup(uni_path_len, 8);
4028     copy_path = kzalloc(copy_size, GFP_KERNEL);
4029     - if (!copy_path)
4030     + if (!copy_path) {
4031     + cifs_small_buf_release(req);
4032     return -ENOMEM;
4033     + }
4034     memcpy((char *)copy_path, (const char *)path,
4035     uni_path_len);
4036     uni_path_len = copy_size;
4037     diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
4038     index e9ab5227e7a8..6fe4898f6975 100644
4039     --- a/fs/cifs/smb2proto.h
4040     +++ b/fs/cifs/smb2proto.h
4041     @@ -203,4 +203,7 @@ extern int smb3_validate_negotiate(const unsigned int, struct cifs_tcon *);
4042    
4043     extern enum securityEnum smb2_select_sectype(struct TCP_Server_Info *,
4044     enum securityEnum);
4045     +#ifdef CONFIG_CIFS_SMB311
4046     +extern int smb311_crypto_shash_allocate(struct TCP_Server_Info *server);
4047     +#endif
4048     #endif /* _SMB2PROTO_H */
4049     diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
4050     index 99493946e2f9..bf49cb73b9e6 100644
4051     --- a/fs/cifs/smb2transport.c
4052     +++ b/fs/cifs/smb2transport.c
4053     @@ -43,77 +43,62 @@
4054     static int
4055     smb2_crypto_shash_allocate(struct TCP_Server_Info *server)
4056     {
4057     - int rc;
4058     - unsigned int size;
4059     + return cifs_alloc_hash("hmac(sha256)",
4060     + &server->secmech.hmacsha256,
4061     + &server->secmech.sdeschmacsha256);
4062     +}
4063    
4064     - if (server->secmech.sdeschmacsha256 != NULL)
4065     - return 0; /* already allocated */
4066     +static int
4067     +smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
4068     +{
4069     + struct cifs_secmech *p = &server->secmech;
4070     + int rc;
4071    
4072     - server->secmech.hmacsha256 = crypto_alloc_shash("hmac(sha256)", 0, 0);
4073     - if (IS_ERR(server->secmech.hmacsha256)) {
4074     - cifs_dbg(VFS, "could not allocate crypto hmacsha256\n");
4075     - rc = PTR_ERR(server->secmech.hmacsha256);
4076     - server->secmech.hmacsha256 = NULL;
4077     - return rc;
4078     - }
4079     + rc = cifs_alloc_hash("hmac(sha256)",
4080     + &p->hmacsha256,
4081     + &p->sdeschmacsha256);
4082     + if (rc)
4083     + goto err;
4084    
4085     - size = sizeof(struct shash_desc) +
4086     - crypto_shash_descsize(server->secmech.hmacsha256);
4087     - server->secmech.sdeschmacsha256 = kmalloc(size, GFP_KERNEL);
4088     - if (!server->secmech.sdeschmacsha256) {
4089     - crypto_free_shash(server->secmech.hmacsha256);
4090     - server->secmech.hmacsha256 = NULL;
4091     - return -ENOMEM;
4092     - }
4093     - server->secmech.sdeschmacsha256->shash.tfm = server->secmech.hmacsha256;
4094     - server->secmech.sdeschmacsha256->shash.flags = 0x0;
4095     + rc = cifs_alloc_hash("cmac(aes)", &p->cmacaes, &p->sdesccmacaes);
4096     + if (rc)
4097     + goto err;
4098    
4099     return 0;
4100     +err:
4101     + cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
4102     + return rc;
4103     }
4104    
4105     -static int
4106     -smb3_crypto_shash_allocate(struct TCP_Server_Info *server)
4107     +#ifdef CONFIG_CIFS_SMB311
4108     +int
4109     +smb311_crypto_shash_allocate(struct TCP_Server_Info *server)
4110     {
4111     - unsigned int size;
4112     - int rc;
4113     -
4114     - if (server->secmech.sdesccmacaes != NULL)
4115     - return 0; /* already allocated */
4116     + struct cifs_secmech *p = &server->secmech;
4117     + int rc = 0;
4118    
4119     - rc = smb2_crypto_shash_allocate(server);
4120     + rc = cifs_alloc_hash("hmac(sha256)",
4121     + &p->hmacsha256,
4122     + &p->sdeschmacsha256);
4123     if (rc)
4124     return rc;
4125    
4126     - server->secmech.cmacaes = crypto_alloc_shash("cmac(aes)", 0, 0);
4127     - if (IS_ERR(server->secmech.cmacaes)) {
4128     - cifs_dbg(VFS, "could not allocate crypto cmac-aes");
4129     - kfree(server->secmech.sdeschmacsha256);
4130     - server->secmech.sdeschmacsha256 = NULL;
4131     - crypto_free_shash(server->secmech.hmacsha256);
4132     - server->secmech.hmacsha256 = NULL;
4133     - rc = PTR_ERR(server->secmech.cmacaes);
4134     - server->secmech.cmacaes = NULL;
4135     - return rc;
4136     - }
4137     + rc = cifs_alloc_hash("cmac(aes)", &p->cmacaes, &p->sdesccmacaes);
4138     + if (rc)
4139     + goto err;
4140    
4141     - size = sizeof(struct shash_desc) +
4142     - crypto_shash_descsize(server->secmech.cmacaes);
4143     - server->secmech.sdesccmacaes = kmalloc(size, GFP_KERNEL);
4144     - if (!server->secmech.sdesccmacaes) {
4145     - cifs_dbg(VFS, "%s: Can't alloc cmacaes\n", __func__);
4146     - kfree(server->secmech.sdeschmacsha256);
4147     - server->secmech.sdeschmacsha256 = NULL;
4148     - crypto_free_shash(server->secmech.hmacsha256);
4149     - crypto_free_shash(server->secmech.cmacaes);
4150     - server->secmech.hmacsha256 = NULL;
4151     - server->secmech.cmacaes = NULL;
4152     - return -ENOMEM;
4153     - }
4154     - server->secmech.sdesccmacaes->shash.tfm = server->secmech.cmacaes;
4155     - server->secmech.sdesccmacaes->shash.flags = 0x0;
4156     + rc = cifs_alloc_hash("sha512", &p->sha512, &p->sdescsha512);
4157     + if (rc)
4158     + goto err;
4159    
4160     return 0;
4161     +
4162     +err:
4163     + cifs_free_hash(&p->cmacaes, &p->sdesccmacaes);
4164     + cifs_free_hash(&p->hmacsha256, &p->sdeschmacsha256);
4165     + return rc;
4166     }
4167     +#endif
4168    
4169     static struct cifs_ses *
4170     smb2_find_smb_ses_unlocked(struct TCP_Server_Info *server, __u64 ses_id)
4171     @@ -457,7 +442,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
4172     cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
4173     return rc;
4174     }
4175     -
4176     +
4177     rc = __cifs_calc_signature(rqst, server, sigptr,
4178     &server->secmech.sdesccmacaes->shash);
4179    
4180     diff --git a/fs/cifs/smbencrypt.c b/fs/cifs/smbencrypt.c
4181     index c12bffefa3c9..a0b80ac651a6 100644
4182     --- a/fs/cifs/smbencrypt.c
4183     +++ b/fs/cifs/smbencrypt.c
4184     @@ -121,25 +121,12 @@ int
4185     mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
4186     {
4187     int rc;
4188     - unsigned int size;
4189     - struct crypto_shash *md4;
4190     - struct sdesc *sdescmd4;
4191     -
4192     - md4 = crypto_alloc_shash("md4", 0, 0);
4193     - if (IS_ERR(md4)) {
4194     - rc = PTR_ERR(md4);
4195     - cifs_dbg(VFS, "%s: Crypto md4 allocation error %d\n",
4196     - __func__, rc);
4197     - return rc;
4198     - }
4199     - size = sizeof(struct shash_desc) + crypto_shash_descsize(md4);
4200     - sdescmd4 = kmalloc(size, GFP_KERNEL);
4201     - if (!sdescmd4) {
4202     - rc = -ENOMEM;
4203     + struct crypto_shash *md4 = NULL;
4204     + struct sdesc *sdescmd4 = NULL;
4205     +
4206     + rc = cifs_alloc_hash("md4", &md4, &sdescmd4);
4207     + if (rc)
4208     goto mdfour_err;
4209     - }
4210     - sdescmd4->shash.tfm = md4;
4211     - sdescmd4->shash.flags = 0x0;
4212    
4213     rc = crypto_shash_init(&sdescmd4->shash);
4214     if (rc) {
4215     @@ -156,9 +143,7 @@ mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len)
4216     cifs_dbg(VFS, "%s: Could not generate md4 hash\n", __func__);
4217    
4218     mdfour_err:
4219     - crypto_free_shash(md4);
4220     - kfree(sdescmd4);
4221     -
4222     + cifs_free_hash(&md4, &sdescmd4);
4223     return rc;
4224     }
4225    
4226     diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
4227     index d5ddfb96c83c..db5be5e2e6f2 100644
4228     --- a/fs/ext4/balloc.c
4229     +++ b/fs/ext4/balloc.c
4230     @@ -243,8 +243,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
4231     */
4232     ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
4233     sb->s_blocksize * 8, bh->b_data);
4234     - ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
4235     - ext4_group_desc_csum_set(sb, block_group, gdp);
4236     return 0;
4237     }
4238    
4239     @@ -448,6 +446,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
4240     err = ext4_init_block_bitmap(sb, bh, block_group, desc);
4241     set_bitmap_uptodate(bh);
4242     set_buffer_uptodate(bh);
4243     + set_buffer_verified(bh);
4244     ext4_unlock_group(sb, block_group);
4245     unlock_buffer(bh);
4246     if (err) {
4247     diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
4248     index 2d593201cf7a..7c70b08d104c 100644
4249     --- a/fs/ext4/ext4_jbd2.c
4250     +++ b/fs/ext4/ext4_jbd2.c
4251     @@ -166,13 +166,6 @@ int __ext4_journal_get_write_access(const char *where, unsigned int line,
4252     might_sleep();
4253    
4254     if (ext4_handle_valid(handle)) {
4255     - struct super_block *sb;
4256     -
4257     - sb = handle->h_transaction->t_journal->j_private;
4258     - if (unlikely(ext4_forced_shutdown(EXT4_SB(sb)))) {
4259     - jbd2_journal_abort_handle(handle);
4260     - return -EIO;
4261     - }
4262     err = jbd2_journal_get_write_access(handle, bh);
4263     if (err)
4264     ext4_journal_abort_handle(where, line, __func__, bh,
4265     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
4266     index 207588dc803e..7ec55dd8db56 100644
4267     --- a/fs/ext4/ialloc.c
4268     +++ b/fs/ext4/ialloc.c
4269     @@ -66,44 +66,6 @@ void ext4_mark_bitmap_end(int start_bit, int end_bit, char *bitmap)
4270     memset(bitmap + (i >> 3), 0xff, (end_bit - i) >> 3);
4271     }
4272    
4273     -/* Initializes an uninitialized inode bitmap */
4274     -static int ext4_init_inode_bitmap(struct super_block *sb,
4275     - struct buffer_head *bh,
4276     - ext4_group_t block_group,
4277     - struct ext4_group_desc *gdp)
4278     -{
4279     - struct ext4_group_info *grp;
4280     - struct ext4_sb_info *sbi = EXT4_SB(sb);
4281     - J_ASSERT_BH(bh, buffer_locked(bh));
4282     -
4283     - /* If checksum is bad mark all blocks and inodes use to prevent
4284     - * allocation, essentially implementing a per-group read-only flag. */
4285     - if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
4286     - grp = ext4_get_group_info(sb, block_group);
4287     - if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
4288     - percpu_counter_sub(&sbi->s_freeclusters_counter,
4289     - grp->bb_free);
4290     - set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
4291     - if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
4292     - int count;
4293     - count = ext4_free_inodes_count(sb, gdp);
4294     - percpu_counter_sub(&sbi->s_freeinodes_counter,
4295     - count);
4296     - }
4297     - set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
4298     - return -EFSBADCRC;
4299     - }
4300     -
4301     - memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
4302     - ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
4303     - bh->b_data);
4304     - ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
4305     - EXT4_INODES_PER_GROUP(sb) / 8);
4306     - ext4_group_desc_csum_set(sb, block_group, gdp);
4307     -
4308     - return 0;
4309     -}
4310     -
4311     void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate)
4312     {
4313     if (uptodate) {
4314     @@ -187,17 +149,14 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
4315    
4316     ext4_lock_group(sb, block_group);
4317     if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
4318     - err = ext4_init_inode_bitmap(sb, bh, block_group, desc);
4319     + memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
4320     + ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
4321     + sb->s_blocksize * 8, bh->b_data);
4322     set_bitmap_uptodate(bh);
4323     set_buffer_uptodate(bh);
4324     set_buffer_verified(bh);
4325     ext4_unlock_group(sb, block_group);
4326     unlock_buffer(bh);
4327     - if (err) {
4328     - ext4_error(sb, "Failed to init inode bitmap for group "
4329     - "%u: %d", block_group, err);
4330     - goto out;
4331     - }
4332     return bh;
4333     }
4334     ext4_unlock_group(sb, block_group);
4335     diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
4336     index 0b9f3f284799..09014c3c4207 100644
4337     --- a/fs/ext4/inode.c
4338     +++ b/fs/ext4/inode.c
4339     @@ -3614,7 +3614,6 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
4340     {
4341     struct file *file = iocb->ki_filp;
4342     struct inode *inode = file->f_mapping->host;
4343     - struct ext4_inode_info *ei = EXT4_I(inode);
4344     ssize_t ret;
4345     loff_t offset = iocb->ki_pos;
4346     size_t count = iov_iter_count(iter);
4347     @@ -3638,7 +3637,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
4348     goto out;
4349     }
4350     orphan = 1;
4351     - ei->i_disksize = inode->i_size;
4352     + ext4_update_i_disksize(inode, inode->i_size);
4353     ext4_journal_stop(handle);
4354     }
4355    
4356     @@ -3746,7 +3745,7 @@ static ssize_t ext4_direct_IO_write(struct kiocb *iocb, struct iov_iter *iter)
4357     if (ret > 0) {
4358     loff_t end = offset + ret;
4359     if (end > inode->i_size) {
4360     - ei->i_disksize = end;
4361     + ext4_update_i_disksize(inode, end);
4362     i_size_write(inode, end);
4363     /*
4364     * We're going to return a positive `ret'
4365     @@ -4686,6 +4685,12 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4366     goto bad_inode;
4367     raw_inode = ext4_raw_inode(&iloc);
4368    
4369     + if ((ino == EXT4_ROOT_INO) && (raw_inode->i_links_count == 0)) {
4370     + EXT4_ERROR_INODE(inode, "root inode unallocated");
4371     + ret = -EFSCORRUPTED;
4372     + goto bad_inode;
4373     + }
4374     +
4375     if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4376     ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4377     if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4378     diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
4379     index d97f40396765..1eb68e626931 100644
4380     --- a/fs/ext4/ioctl.c
4381     +++ b/fs/ext4/ioctl.c
4382     @@ -493,15 +493,13 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
4383     set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
4384     if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) {
4385     (void) ext4_force_commit(sb);
4386     - jbd2_journal_abort(sbi->s_journal, 0);
4387     + jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
4388     }
4389     break;
4390     case EXT4_GOING_FLAGS_NOLOGFLUSH:
4391     set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
4392     - if (sbi->s_journal && !is_journal_aborted(sbi->s_journal)) {
4393     - msleep(100);
4394     - jbd2_journal_abort(sbi->s_journal, 0);
4395     - }
4396     + if (sbi->s_journal && !is_journal_aborted(sbi->s_journal))
4397     + jbd2_journal_abort(sbi->s_journal, -ESHUTDOWN);
4398     break;
4399     default:
4400     return -EINVAL;
4401     diff --git a/fs/ext4/super.c b/fs/ext4/super.c
4402     index 16d247f056e2..3a605c672649 100644
4403     --- a/fs/ext4/super.c
4404     +++ b/fs/ext4/super.c
4405     @@ -2330,6 +2330,8 @@ static int ext4_check_descriptors(struct super_block *sb,
4406     ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
4407     "Block bitmap for group %u overlaps "
4408     "superblock", i);
4409     + if (!sb_rdonly(sb))
4410     + return 0;
4411     }
4412     if (block_bitmap < first_block || block_bitmap > last_block) {
4413     ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
4414     @@ -2342,6 +2344,8 @@ static int ext4_check_descriptors(struct super_block *sb,
4415     ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
4416     "Inode bitmap for group %u overlaps "
4417     "superblock", i);
4418     + if (!sb_rdonly(sb))
4419     + return 0;
4420     }
4421     if (inode_bitmap < first_block || inode_bitmap > last_block) {
4422     ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
4423     @@ -2354,6 +2358,8 @@ static int ext4_check_descriptors(struct super_block *sb,
4424     ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
4425     "Inode table for group %u overlaps "
4426     "superblock", i);
4427     + if (!sb_rdonly(sb))
4428     + return 0;
4429     }
4430     if (inode_table < first_block ||
4431     inode_table + sbi->s_itb_per_group - 1 > last_block) {
4432     @@ -3490,15 +3496,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
4433     }
4434    
4435     /* Load the checksum driver */
4436     - if (ext4_has_feature_metadata_csum(sb) ||
4437     - ext4_has_feature_ea_inode(sb)) {
4438     - sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
4439     - if (IS_ERR(sbi->s_chksum_driver)) {
4440     - ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
4441     - ret = PTR_ERR(sbi->s_chksum_driver);
4442     - sbi->s_chksum_driver = NULL;
4443     - goto failed_mount;
4444     - }
4445     + sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
4446     + if (IS_ERR(sbi->s_chksum_driver)) {
4447     + ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
4448     + ret = PTR_ERR(sbi->s_chksum_driver);
4449     + sbi->s_chksum_driver = NULL;
4450     + goto failed_mount;
4451     }
4452    
4453     /* Check superblock checksum */
4454     diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
4455     index 218a7ba57819..1718354e6322 100644
4456     --- a/fs/ext4/xattr.c
4457     +++ b/fs/ext4/xattr.c
4458     @@ -194,10 +194,13 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
4459    
4460     /* Check the values */
4461     while (!IS_LAST_ENTRY(entry)) {
4462     - if (entry->e_value_size != 0 &&
4463     - entry->e_value_inum == 0) {
4464     + u32 size = le32_to_cpu(entry->e_value_size);
4465     +
4466     + if (size > EXT4_XATTR_SIZE_MAX)
4467     + return -EFSCORRUPTED;
4468     +
4469     + if (size != 0 && entry->e_value_inum == 0) {
4470     u16 offs = le16_to_cpu(entry->e_value_offs);
4471     - u32 size = le32_to_cpu(entry->e_value_size);
4472     void *value;
4473    
4474     /*
4475     @@ -221,25 +224,36 @@ ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
4476     }
4477    
4478     static inline int
4479     -ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
4480     +__ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
4481     + const char *function, unsigned int line)
4482     {
4483     - int error;
4484     + int error = -EFSCORRUPTED;
4485    
4486     if (buffer_verified(bh))
4487     return 0;
4488    
4489     if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
4490     BHDR(bh)->h_blocks != cpu_to_le32(1))
4491     - return -EFSCORRUPTED;
4492     + goto errout;
4493     + error = -EFSBADCRC;
4494     if (!ext4_xattr_block_csum_verify(inode, bh))
4495     - return -EFSBADCRC;
4496     + goto errout;
4497     error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
4498     bh->b_data);
4499     - if (!error)
4500     +errout:
4501     + if (error)
4502     + __ext4_error_inode(inode, function, line, 0,
4503     + "corrupted xattr block %llu",
4504     + (unsigned long long) bh->b_blocknr);
4505     + else
4506     set_buffer_verified(bh);
4507     return error;
4508     }
4509    
4510     +#define ext4_xattr_check_block(inode, bh) \
4511     + __ext4_xattr_check_block((inode), (bh), __func__, __LINE__)
4512     +
4513     +
4514     static int
4515     __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
4516     void *end, const char *function, unsigned int line)
4517     @@ -261,18 +275,22 @@ __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
4518     __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
4519    
4520     static int
4521     -ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
4522     - const char *name, int sorted)
4523     +xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
4524     + void *end, int name_index, const char *name, int sorted)
4525     {
4526     - struct ext4_xattr_entry *entry;
4527     + struct ext4_xattr_entry *entry, *next;
4528     size_t name_len;
4529     int cmp = 1;
4530    
4531     if (name == NULL)
4532     return -EINVAL;
4533     name_len = strlen(name);
4534     - entry = *pentry;
4535     - for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
4536     + for (entry = *pentry; !IS_LAST_ENTRY(entry); entry = next) {
4537     + next = EXT4_XATTR_NEXT(entry);
4538     + if ((void *) next >= end) {
4539     + EXT4_ERROR_INODE(inode, "corrupted xattr entries");
4540     + return -EFSCORRUPTED;
4541     + }
4542     cmp = name_index - entry->e_name_index;
4543     if (!cmp)
4544     cmp = name_len - entry->e_name_len;
4545     @@ -494,6 +512,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
4546     struct buffer_head *bh = NULL;
4547     struct ext4_xattr_entry *entry;
4548     size_t size;
4549     + void *end;
4550     int error;
4551     struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
4552    
4553     @@ -510,20 +529,20 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
4554     goto cleanup;
4555     ea_bdebug(bh, "b_count=%d, refcount=%d",
4556     atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
4557     - if (ext4_xattr_check_block(inode, bh)) {
4558     - EXT4_ERROR_INODE(inode, "bad block %llu",
4559     - EXT4_I(inode)->i_file_acl);
4560     - error = -EFSCORRUPTED;
4561     + error = ext4_xattr_check_block(inode, bh);
4562     + if (error)
4563     goto cleanup;
4564     - }
4565     ext4_xattr_block_cache_insert(ea_block_cache, bh);
4566     entry = BFIRST(bh);
4567     - error = ext4_xattr_find_entry(&entry, name_index, name, 1);
4568     + end = bh->b_data + bh->b_size;
4569     + error = xattr_find_entry(inode, &entry, end, name_index, name, 1);
4570     if (error)
4571     goto cleanup;
4572     size = le32_to_cpu(entry->e_value_size);
4573     + error = -ERANGE;
4574     + if (unlikely(size > EXT4_XATTR_SIZE_MAX))
4575     + goto cleanup;
4576     if (buffer) {
4577     - error = -ERANGE;
4578     if (size > buffer_size)
4579     goto cleanup;
4580     if (entry->e_value_inum) {
4581     @@ -532,8 +551,12 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
4582     if (error)
4583     goto cleanup;
4584     } else {
4585     - memcpy(buffer, bh->b_data +
4586     - le16_to_cpu(entry->e_value_offs), size);
4587     + u16 offset = le16_to_cpu(entry->e_value_offs);
4588     + void *p = bh->b_data + offset;
4589     +
4590     + if (unlikely(p + size > end))
4591     + goto cleanup;
4592     + memcpy(buffer, p, size);
4593     }
4594     }
4595     error = size;
4596     @@ -567,12 +590,14 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
4597     if (error)
4598     goto cleanup;
4599     entry = IFIRST(header);
4600     - error = ext4_xattr_find_entry(&entry, name_index, name, 0);
4601     + error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
4602     if (error)
4603     goto cleanup;
4604     size = le32_to_cpu(entry->e_value_size);
4605     + error = -ERANGE;
4606     + if (unlikely(size > EXT4_XATTR_SIZE_MAX))
4607     + goto cleanup;
4608     if (buffer) {
4609     - error = -ERANGE;
4610     if (size > buffer_size)
4611     goto cleanup;
4612     if (entry->e_value_inum) {
4613     @@ -581,8 +606,12 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
4614     if (error)
4615     goto cleanup;
4616     } else {
4617     - memcpy(buffer, (void *)IFIRST(header) +
4618     - le16_to_cpu(entry->e_value_offs), size);
4619     + u16 offset = le16_to_cpu(entry->e_value_offs);
4620     + void *p = (void *)IFIRST(header) + offset;
4621     +
4622     + if (unlikely(p + size > end))
4623     + goto cleanup;
4624     + memcpy(buffer, p, size);
4625     }
4626     }
4627     error = size;
4628     @@ -675,12 +704,9 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
4629     goto cleanup;
4630     ea_bdebug(bh, "b_count=%d, refcount=%d",
4631     atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
4632     - if (ext4_xattr_check_block(inode, bh)) {
4633     - EXT4_ERROR_INODE(inode, "bad block %llu",
4634     - EXT4_I(inode)->i_file_acl);
4635     - error = -EFSCORRUPTED;
4636     + error = ext4_xattr_check_block(inode, bh);
4637     + if (error)
4638     goto cleanup;
4639     - }
4640     ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
4641     error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
4642    
4643     @@ -807,10 +833,9 @@ int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
4644     goto out;
4645     }
4646    
4647     - if (ext4_xattr_check_block(inode, bh)) {
4648     - ret = -EFSCORRUPTED;
4649     + ret = ext4_xattr_check_block(inode, bh);
4650     + if (ret)
4651     goto out;
4652     - }
4653    
4654     for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
4655     entry = EXT4_XATTR_NEXT(entry))
4656     @@ -1792,19 +1817,16 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
4657     ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
4658     atomic_read(&(bs->bh->b_count)),
4659     le32_to_cpu(BHDR(bs->bh)->h_refcount));
4660     - if (ext4_xattr_check_block(inode, bs->bh)) {
4661     - EXT4_ERROR_INODE(inode, "bad block %llu",
4662     - EXT4_I(inode)->i_file_acl);
4663     - error = -EFSCORRUPTED;
4664     + error = ext4_xattr_check_block(inode, bs->bh);
4665     + if (error)
4666     goto cleanup;
4667     - }
4668     /* Find the named attribute. */
4669     bs->s.base = BHDR(bs->bh);
4670     bs->s.first = BFIRST(bs->bh);
4671     bs->s.end = bs->bh->b_data + bs->bh->b_size;
4672     bs->s.here = bs->s.first;
4673     - error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
4674     - i->name, 1);
4675     + error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
4676     + i->name_index, i->name, 1);
4677     if (error && error != -ENODATA)
4678     goto cleanup;
4679     bs->s.not_found = error;
4680     @@ -2163,8 +2185,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
4681     if (error)
4682     return error;
4683     /* Find the named attribute. */
4684     - error = ext4_xattr_find_entry(&is->s.here, i->name_index,
4685     - i->name, 0);
4686     + error = xattr_find_entry(inode, &is->s.here, is->s.end,
4687     + i->name_index, i->name, 0);
4688     if (error && error != -ENODATA)
4689     return error;
4690     is->s.not_found = error;
4691     @@ -2720,13 +2742,9 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
4692     error = -EIO;
4693     if (!bh)
4694     goto cleanup;
4695     - if (ext4_xattr_check_block(inode, bh)) {
4696     - EXT4_ERROR_INODE(inode, "bad block %llu",
4697     - EXT4_I(inode)->i_file_acl);
4698     - error = -EFSCORRUPTED;
4699     - brelse(bh);
4700     + error = ext4_xattr_check_block(inode, bh);
4701     + if (error)
4702     goto cleanup;
4703     - }
4704     base = BHDR(bh);
4705     end = bh->b_data + bh->b_size;
4706     min_offs = end - base;
4707     @@ -2883,11 +2901,8 @@ int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
4708     goto cleanup;
4709     }
4710     error = ext4_xattr_check_block(inode, bh);
4711     - if (error) {
4712     - EXT4_ERROR_INODE(inode, "bad block %llu (error %d)",
4713     - EXT4_I(inode)->i_file_acl, error);
4714     + if (error)
4715     goto cleanup;
4716     - }
4717    
4718     if (ext4_has_feature_ea_inode(inode->i_sb)) {
4719     for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
4720     diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
4721     index f8cc07588ac9..262b9314119f 100644
4722     --- a/fs/ext4/xattr.h
4723     +++ b/fs/ext4/xattr.h
4724     @@ -70,6 +70,17 @@ struct ext4_xattr_entry {
4725     EXT4_I(inode)->i_extra_isize))
4726     #define IFIRST(hdr) ((struct ext4_xattr_entry *)((hdr)+1))
4727    
4728     +/*
4729     + * XATTR_SIZE_MAX is currently 64k, but for the purposes of checking
4730     + * for file system consistency errors, we use a somewhat bigger value.
4731     + * This allows XATTR_SIZE_MAX to grow in the future, but by using this
4732     + * instead of INT_MAX for certain consistency checks, we don't need to
4733     + * worry about arithmetic overflows. (Actually XATTR_SIZE_MAX is
4734     + * defined in include/uapi/linux/limits.h, so changing it is going
4735     + * not going to be trivial....)
4736     + */
4737     +#define EXT4_XATTR_SIZE_MAX (1 << 24)
4738     +
4739     /*
4740     * The minimum size of EA value when you start storing it in an external inode
4741     * size of block - size of header - size of 1 entry - 4 null bytes
4742     diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
4743     index 245c430a2e41..8a7ef9378bf6 100644
4744     --- a/fs/fs-writeback.c
4745     +++ b/fs/fs-writeback.c
4746     @@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits)
4747     */
4748     if (inode && inode_to_wb_is_valid(inode)) {
4749     struct bdi_writeback *wb;
4750     - bool locked, congested;
4751     + struct wb_lock_cookie lock_cookie = {};
4752     + bool congested;
4753    
4754     - wb = unlocked_inode_to_wb_begin(inode, &locked);
4755     + wb = unlocked_inode_to_wb_begin(inode, &lock_cookie);
4756     congested = wb_congested(wb, cong_bits);
4757     - unlocked_inode_to_wb_end(inode, locked);
4758     + unlocked_inode_to_wb_end(inode, &lock_cookie);
4759     return congested;
4760     }
4761    
4762     diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
4763     index 7d5ef3bf3f3e..61d48f0c41a1 100644
4764     --- a/fs/jbd2/journal.c
4765     +++ b/fs/jbd2/journal.c
4766     @@ -961,7 +961,7 @@ int __jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block)
4767     }
4768    
4769     /*
4770     - * This is a variaon of __jbd2_update_log_tail which checks for validity of
4771     + * This is a variation of __jbd2_update_log_tail which checks for validity of
4772     * provided log tail and locks j_checkpoint_mutex. So it is safe against races
4773     * with other threads updating log tail.
4774     */
4775     @@ -1404,6 +1404,9 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
4776     journal_superblock_t *sb = journal->j_superblock;
4777     int ret;
4778    
4779     + if (is_journal_aborted(journal))
4780     + return -EIO;
4781     +
4782     BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
4783     jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
4784     tail_block, tail_tid);
4785     @@ -1470,12 +1473,15 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
4786     void jbd2_journal_update_sb_errno(journal_t *journal)
4787     {
4788     journal_superblock_t *sb = journal->j_superblock;
4789     + int errcode;
4790    
4791     read_lock(&journal->j_state_lock);
4792     - jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
4793     - journal->j_errno);
4794     - sb->s_errno = cpu_to_be32(journal->j_errno);
4795     + errcode = journal->j_errno;
4796     read_unlock(&journal->j_state_lock);
4797     + if (errcode == -ESHUTDOWN)
4798     + errcode = 0;
4799     + jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
4800     + sb->s_errno = cpu_to_be32(errcode);
4801    
4802     jbd2_write_superblock(journal, REQ_SYNC | REQ_FUA);
4803     }
4804     @@ -2092,12 +2098,22 @@ void __jbd2_journal_abort_hard(journal_t *journal)
4805     * but don't do any other IO. */
4806     static void __journal_abort_soft (journal_t *journal, int errno)
4807     {
4808     - if (journal->j_flags & JBD2_ABORT)
4809     - return;
4810     + int old_errno;
4811    
4812     - if (!journal->j_errno)
4813     + write_lock(&journal->j_state_lock);
4814     + old_errno = journal->j_errno;
4815     + if (!journal->j_errno || errno == -ESHUTDOWN)
4816     journal->j_errno = errno;
4817    
4818     + if (journal->j_flags & JBD2_ABORT) {
4819     + write_unlock(&journal->j_state_lock);
4820     + if (!old_errno && old_errno != -ESHUTDOWN &&
4821     + errno == -ESHUTDOWN)
4822     + jbd2_journal_update_sb_errno(journal);
4823     + return;
4824     + }
4825     + write_unlock(&journal->j_state_lock);
4826     +
4827     __jbd2_journal_abort_hard(journal);
4828    
4829     if (errno) {
4830     diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
4831     index 153f1c6eb169..33e01de576d2 100644
4832     --- a/fs/jffs2/super.c
4833     +++ b/fs/jffs2/super.c
4834     @@ -342,7 +342,7 @@ static void jffs2_put_super (struct super_block *sb)
4835     static void jffs2_kill_sb(struct super_block *sb)
4836     {
4837     struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
4838     - if (!sb_rdonly(sb))
4839     + if (c && !sb_rdonly(sb))
4840     jffs2_stop_garbage_collect_thread(c);
4841     kill_mtd_super(sb);
4842     kfree(c);
4843     diff --git a/fs/namespace.c b/fs/namespace.c
4844     index adae9ffce91d..62b17aff1908 100644
4845     --- a/fs/namespace.c
4846     +++ b/fs/namespace.c
4847     @@ -1089,7 +1089,8 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
4848     goto out_free;
4849     }
4850    
4851     - mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);
4852     + mnt->mnt.mnt_flags = old->mnt.mnt_flags;
4853     + mnt->mnt.mnt_flags &= ~(MNT_WRITE_HOLD|MNT_MARKED|MNT_INTERNAL);
4854     /* Don't allow unprivileged users to change mount flags */
4855     if (flag & CL_UNPRIVILEGED) {
4856     mnt->mnt.mnt_flags |= MNT_LOCK_ATIME;
4857     diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
4858     index 3c7053207297..bd520450c37a 100644
4859     --- a/fs/notify/fanotify/fanotify.c
4860     +++ b/fs/notify/fanotify/fanotify.c
4861     @@ -90,7 +90,7 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
4862     u32 event_mask,
4863     const void *data, int data_type)
4864     {
4865     - __u32 marks_mask, marks_ignored_mask;
4866     + __u32 marks_mask = 0, marks_ignored_mask = 0;
4867     const struct path *path = data;
4868    
4869     pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p"
4870     @@ -106,24 +106,20 @@ static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,
4871     !d_can_lookup(path->dentry))
4872     return false;
4873    
4874     - if (inode_mark && vfsmnt_mark) {
4875     - marks_mask = (vfsmnt_mark->mask | inode_mark->mask);
4876     - marks_ignored_mask = (vfsmnt_mark->ignored_mask | inode_mark->ignored_mask);
4877     - } else if (inode_mark) {
4878     - /*
4879     - * if the event is for a child and this inode doesn't care about
4880     - * events on the child, don't send it!
4881     - */
4882     - if ((event_mask & FS_EVENT_ON_CHILD) &&
4883     - !(inode_mark->mask & FS_EVENT_ON_CHILD))
4884     - return false;
4885     - marks_mask = inode_mark->mask;
4886     - marks_ignored_mask = inode_mark->ignored_mask;
4887     - } else if (vfsmnt_mark) {
4888     - marks_mask = vfsmnt_mark->mask;
4889     - marks_ignored_mask = vfsmnt_mark->ignored_mask;
4890     - } else {
4891     - BUG();
4892     + /*
4893     + * if the event is for a child and this inode doesn't care about
4894     + * events on the child, don't send it!
4895     + */
4896     + if (inode_mark &&
4897     + (!(event_mask & FS_EVENT_ON_CHILD) ||
4898     + (inode_mark->mask & FS_EVENT_ON_CHILD))) {
4899     + marks_mask |= inode_mark->mask;
4900     + marks_ignored_mask |= inode_mark->ignored_mask;
4901     + }
4902     +
4903     + if (vfsmnt_mark) {
4904     + marks_mask |= vfsmnt_mark->mask;
4905     + marks_ignored_mask |= vfsmnt_mark->ignored_mask;
4906     }
4907    
4908     if (d_is_dir(path->dentry) &&
4909     diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c
4910     index 47ebd9bfd1a1..1997ce49ab46 100644
4911     --- a/fs/orangefs/super.c
4912     +++ b/fs/orangefs/super.c
4913     @@ -594,6 +594,11 @@ void orangefs_kill_sb(struct super_block *sb)
4914     /* provided sb cleanup */
4915     kill_anon_super(sb);
4916    
4917     + if (!ORANGEFS_SB(sb)) {
4918     + mutex_lock(&orangefs_request_mutex);
4919     + mutex_unlock(&orangefs_request_mutex);
4920     + return;
4921     + }
4922     /*
4923     * issue the unmount to userspace to tell it to remove the
4924     * dynamic mount info it has for this superblock
4925     diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
4926     index 69ff280bdfe8..2be907231375 100644
4927     --- a/fs/reiserfs/journal.c
4928     +++ b/fs/reiserfs/journal.c
4929     @@ -2643,7 +2643,7 @@ static int journal_init_dev(struct super_block *super,
4930     if (IS_ERR(journal->j_dev_bd)) {
4931     result = PTR_ERR(journal->j_dev_bd);
4932     journal->j_dev_bd = NULL;
4933     - reiserfs_warning(super,
4934     + reiserfs_warning(super, "sh-457",
4935     "journal_init_dev: Cannot open '%s': %i",
4936     jdev_name, result);
4937     return result;
4938     diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
4939     index 5496b17b959c..e1cd3dcf5a03 100644
4940     --- a/fs/ubifs/super.c
4941     +++ b/fs/ubifs/super.c
4942     @@ -1739,8 +1739,11 @@ static void ubifs_remount_ro(struct ubifs_info *c)
4943    
4944     dbg_save_space_info(c);
4945    
4946     - for (i = 0; i < c->jhead_cnt; i++)
4947     - ubifs_wbuf_sync(&c->jheads[i].wbuf);
4948     + for (i = 0; i < c->jhead_cnt; i++) {
4949     + err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
4950     + if (err)
4951     + ubifs_ro_mode(c, err);
4952     + }
4953    
4954     c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
4955     c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
4956     @@ -1806,8 +1809,11 @@ static void ubifs_put_super(struct super_block *sb)
4957     int err;
4958    
4959     /* Synchronize write-buffers */
4960     - for (i = 0; i < c->jhead_cnt; i++)
4961     - ubifs_wbuf_sync(&c->jheads[i].wbuf);
4962     + for (i = 0; i < c->jhead_cnt; i++) {
4963     + err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
4964     + if (err)
4965     + ubifs_ro_mode(c, err);
4966     + }
4967    
4968     /*
4969     * We are being cleanly unmounted which means the
4970     diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
4971     index 695389a4fc23..3a3be23689b3 100644
4972     --- a/fs/udf/unicode.c
4973     +++ b/fs/udf/unicode.c
4974     @@ -28,6 +28,9 @@
4975    
4976     #include "udf_sb.h"
4977    
4978     +#define SURROGATE_MASK 0xfffff800
4979     +#define SURROGATE_PAIR 0x0000d800
4980     +
4981     static int udf_uni2char_utf8(wchar_t uni,
4982     unsigned char *out,
4983     int boundlen)
4984     @@ -37,6 +40,9 @@ static int udf_uni2char_utf8(wchar_t uni,
4985     if (boundlen <= 0)
4986     return -ENAMETOOLONG;
4987    
4988     + if ((uni & SURROGATE_MASK) == SURROGATE_PAIR)
4989     + return -EINVAL;
4990     +
4991     if (uni < 0x80) {
4992     out[u_len++] = (unsigned char)uni;
4993     } else if (uni < 0x800) {
4994     diff --git a/include/dt-bindings/clock/mt2701-clk.h b/include/dt-bindings/clock/mt2701-clk.h
4995     index 551f7600ab58..24e93dfcee9f 100644
4996     --- a/include/dt-bindings/clock/mt2701-clk.h
4997     +++ b/include/dt-bindings/clock/mt2701-clk.h
4998     @@ -176,7 +176,8 @@
4999     #define CLK_TOP_AUD_EXT1 156
5000     #define CLK_TOP_AUD_EXT2 157
5001     #define CLK_TOP_NFI1X_PAD 158
5002     -#define CLK_TOP_NR 159
5003     +#define CLK_TOP_AXISEL_D4 159
5004     +#define CLK_TOP_NR 160
5005    
5006     /* APMIXEDSYS */
5007    
5008     diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
5009     index fff4cfa0c21d..eac387a3bfef 100644
5010     --- a/include/linux/backing-dev-defs.h
5011     +++ b/include/linux/backing-dev-defs.h
5012     @@ -199,6 +199,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync)
5013     set_wb_congested(bdi->wb.congested, sync);
5014     }
5015    
5016     +struct wb_lock_cookie {
5017     + bool locked;
5018     + unsigned long flags;
5019     +};
5020     +
5021     #ifdef CONFIG_CGROUP_WRITEBACK
5022    
5023     /**
5024     diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
5025     index 16621579a3db..012adec97543 100644
5026     --- a/include/linux/backing-dev.h
5027     +++ b/include/linux/backing-dev.h
5028     @@ -342,7 +342,7 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
5029     /**
5030     * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction
5031     * @inode: target inode
5032     - * @lockedp: temp bool output param, to be passed to the end function
5033     + * @cookie: output param, to be passed to the end function
5034     *
5035     * The caller wants to access the wb associated with @inode but isn't
5036     * holding inode->i_lock, mapping->tree_lock or wb->list_lock. This
5037     @@ -350,12 +350,12 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
5038     * association doesn't change until the transaction is finished with
5039     * unlocked_inode_to_wb_end().
5040     *
5041     - * The caller must call unlocked_inode_to_wb_end() with *@lockdep
5042     - * afterwards and can't sleep during transaction. IRQ may or may not be
5043     - * disabled on return.
5044     + * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and
5045     + * can't sleep during the transaction. IRQs may or may not be disabled on
5046     + * return.
5047     */
5048     static inline struct bdi_writeback *
5049     -unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
5050     +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
5051     {
5052     rcu_read_lock();
5053    
5054     @@ -363,10 +363,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
5055     * Paired with store_release in inode_switch_wb_work_fn() and
5056     * ensures that we see the new wb if we see cleared I_WB_SWITCH.
5057     */
5058     - *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
5059     + cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH;
5060    
5061     - if (unlikely(*lockedp))
5062     - spin_lock_irq(&inode->i_mapping->tree_lock);
5063     + if (unlikely(cookie->locked))
5064     + spin_lock_irqsave(&inode->i_mapping->tree_lock, cookie->flags);
5065    
5066     /*
5067     * Protected by either !I_WB_SWITCH + rcu_read_lock() or tree_lock.
5068     @@ -378,12 +378,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
5069     /**
5070     * unlocked_inode_to_wb_end - end inode wb access transaction
5071     * @inode: target inode
5072     - * @locked: *@lockedp from unlocked_inode_to_wb_begin()
5073     + * @cookie: @cookie from unlocked_inode_to_wb_begin()
5074     */
5075     -static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
5076     +static inline void unlocked_inode_to_wb_end(struct inode *inode,
5077     + struct wb_lock_cookie *cookie)
5078     {
5079     - if (unlikely(locked))
5080     - spin_unlock_irq(&inode->i_mapping->tree_lock);
5081     + if (unlikely(cookie->locked))
5082     + spin_unlock_irqrestore(&inode->i_mapping->tree_lock, cookie->flags);
5083    
5084     rcu_read_unlock();
5085     }
5086     @@ -430,12 +431,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode)
5087     }
5088    
5089     static inline struct bdi_writeback *
5090     -unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp)
5091     +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie)
5092     {
5093     return inode_to_wb(inode);
5094     }
5095    
5096     -static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked)
5097     +static inline void unlocked_inode_to_wb_end(struct inode *inode,
5098     + struct wb_lock_cookie *cookie)
5099     {
5100     }
5101    
5102     diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
5103     index 1c8a8a2aedf7..91072b68dc38 100644
5104     --- a/include/linux/blk_types.h
5105     +++ b/include/linux/blk_types.h
5106     @@ -20,8 +20,13 @@ typedef void (bio_end_io_t) (struct bio *);
5107    
5108     /*
5109     * Block error status values. See block/blk-core:blk_errors for the details.
5110     + * Alpha cannot write a byte atomically, so we need to use 32-bit value.
5111     */
5112     +#if defined(CONFIG_ALPHA) && !defined(__alpha_bwx__)
5113     +typedef u32 __bitwise blk_status_t;
5114     +#else
5115     typedef u8 __bitwise blk_status_t;
5116     +#endif
5117     #define BLK_STS_OK 0
5118     #define BLK_STS_NOTSUPP ((__force blk_status_t)1)
5119     #define BLK_STS_TIMEOUT ((__force blk_status_t)2)
5120     diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
5121     index be3aef6839f6..070f85d92c15 100644
5122     --- a/include/linux/compiler-clang.h
5123     +++ b/include/linux/compiler-clang.h
5124     @@ -17,9 +17,6 @@
5125     */
5126     #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
5127    
5128     -#define randomized_struct_fields_start struct {
5129     -#define randomized_struct_fields_end };
5130     -
5131     /* Clang doesn't have a way to turn it off per-function, yet. */
5132     #ifdef __noretpoline
5133     #undef __noretpoline
5134     diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
5135     index a1ffbf25873f..b78b31af36f8 100644
5136     --- a/include/linux/compiler-gcc.h
5137     +++ b/include/linux/compiler-gcc.h
5138     @@ -229,6 +229,9 @@
5139     #ifdef RANDSTRUCT_PLUGIN
5140     #define __randomize_layout __attribute__((randomize_layout))
5141     #define __no_randomize_layout __attribute__((no_randomize_layout))
5142     +/* This anon struct can add padding, so only enable it under randstruct. */
5143     +#define randomized_struct_fields_start struct {
5144     +#define randomized_struct_fields_end } __randomize_layout;
5145     #endif
5146    
5147     #endif /* GCC_VERSION >= 40500 */
5148     @@ -243,15 +246,6 @@
5149     */
5150     #define __visible __attribute__((externally_visible))
5151    
5152     -/*
5153     - * RANDSTRUCT_PLUGIN wants to use an anonymous struct, but it is only
5154     - * possible since GCC 4.6. To provide as much build testing coverage
5155     - * as possible, this is used for all GCC 4.6+ builds, and not just on
5156     - * RANDSTRUCT_PLUGIN builds.
5157     - */
5158     -#define randomized_struct_fields_start struct {
5159     -#define randomized_struct_fields_end } __randomize_layout;
5160     -
5161     #endif /* GCC_VERSION >= 40600 */
5162    
5163    
5164     diff --git a/include/linux/hid.h b/include/linux/hid.h
5165     index ab05a86269dc..06e6e04e6c11 100644
5166     --- a/include/linux/hid.h
5167     +++ b/include/linux/hid.h
5168     @@ -512,6 +512,12 @@ enum hid_type {
5169     HID_TYPE_USBNONE
5170     };
5171    
5172     +enum hid_battery_status {
5173     + HID_BATTERY_UNKNOWN = 0,
5174     + HID_BATTERY_QUERIED, /* Kernel explicitly queried battery strength */
5175     + HID_BATTERY_REPORTED, /* Device sent unsolicited battery strength report */
5176     +};
5177     +
5178     struct hid_driver;
5179     struct hid_ll_driver;
5180    
5181     @@ -554,7 +560,8 @@ struct hid_device { /* device report descriptor */
5182     __s32 battery_max;
5183     __s32 battery_report_type;
5184     __s32 battery_report_id;
5185     - bool battery_reported;
5186     + enum hid_battery_status battery_status;
5187     + bool battery_avoid_query;
5188     #endif
5189    
5190     unsigned int status; /* see STAT flags above */
5191     @@ -839,7 +846,7 @@ extern int hidinput_connect(struct hid_device *hid, unsigned int force);
5192     extern void hidinput_disconnect(struct hid_device *);
5193    
5194     int hid_set_field(struct hid_field *, unsigned, __s32);
5195     -int hid_input_report(struct hid_device *, int type, u8 *, int, int);
5196     +int hid_input_report(struct hid_device *, int type, u8 *, u32, int);
5197     int hidinput_find_field(struct hid_device *hid, unsigned int type, unsigned int code, struct hid_field **field);
5198     struct hid_field *hidinput_get_led_field(struct hid_device *hid);
5199     unsigned int hidinput_count_leds(struct hid_device *hid);
5200     @@ -1086,13 +1093,13 @@ static inline void hid_hw_wait(struct hid_device *hdev)
5201     *
5202     * @report: the report we want to know the length
5203     */
5204     -static inline int hid_report_len(struct hid_report *report)
5205     +static inline u32 hid_report_len(struct hid_report *report)
5206     {
5207     /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
5208     return ((report->size - 1) >> 3) + 1 + (report->id > 0);
5209     }
5210    
5211     -int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
5212     +int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
5213     int interrupt);
5214    
5215     /* HID quirks API */
5216     diff --git a/include/linux/hmm.h b/include/linux/hmm.h
5217     index 96e69979f84d..8198faf16ed6 100644
5218     --- a/include/linux/hmm.h
5219     +++ b/include/linux/hmm.h
5220     @@ -498,23 +498,16 @@ struct hmm_device {
5221     struct hmm_device *hmm_device_new(void *drvdata);
5222     void hmm_device_put(struct hmm_device *hmm_device);
5223     #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */
5224     -#endif /* IS_ENABLED(CONFIG_HMM) */
5225    
5226     /* Below are for HMM internal use only! Not to be used by device driver! */
5227     -#if IS_ENABLED(CONFIG_HMM_MIRROR)
5228     void hmm_mm_destroy(struct mm_struct *mm);
5229    
5230     static inline void hmm_mm_init(struct mm_struct *mm)
5231     {
5232     mm->hmm = NULL;
5233     }
5234     -#else /* IS_ENABLED(CONFIG_HMM_MIRROR) */
5235     -static inline void hmm_mm_destroy(struct mm_struct *mm) {}
5236     -static inline void hmm_mm_init(struct mm_struct *mm) {}
5237     -#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
5238     -
5239     -
5240     #else /* IS_ENABLED(CONFIG_HMM) */
5241     static inline void hmm_mm_destroy(struct mm_struct *mm) {}
5242     static inline void hmm_mm_init(struct mm_struct *mm) {}
5243     +#endif /* IS_ENABLED(CONFIG_HMM) */
5244     #endif /* LINUX_HMM_H */
5245     diff --git a/include/linux/tty.h b/include/linux/tty.h
5246     index 0a6c71e0ad01..47f8af22f216 100644
5247     --- a/include/linux/tty.h
5248     +++ b/include/linux/tty.h
5249     @@ -364,6 +364,7 @@ struct tty_file_private {
5250     #define TTY_PTY_LOCK 16 /* pty private */
5251     #define TTY_NO_WRITE_SPLIT 17 /* Preserve write boundaries to driver */
5252     #define TTY_HUPPED 18 /* Post driver->hangup() */
5253     +#define TTY_HUPPING 19 /* Hangup in progress */
5254     #define TTY_LDISC_HALTED 22 /* Line discipline is halted */
5255    
5256     /* Values for tty->flow_change */
5257     diff --git a/include/sound/pcm_oss.h b/include/sound/pcm_oss.h
5258     index 760c969d885d..12bbf8c81112 100644
5259     --- a/include/sound/pcm_oss.h
5260     +++ b/include/sound/pcm_oss.h
5261     @@ -57,6 +57,7 @@ struct snd_pcm_oss_runtime {
5262     char *buffer; /* vmallocated period */
5263     size_t buffer_used; /* used length from period buffer */
5264     struct mutex params_lock;
5265     + atomic_t rw_ref; /* concurrent read/write accesses */
5266     #ifdef CONFIG_SND_PCM_OSS_PLUGINS
5267     struct snd_pcm_plugin *plugin_first;
5268     struct snd_pcm_plugin *plugin_last;
5269     diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
5270     index c34f4490d025..26ee91300e3e 100644
5271     --- a/include/uapi/linux/random.h
5272     +++ b/include/uapi/linux/random.h
5273     @@ -35,6 +35,9 @@
5274     /* Clear the entropy pool and associated counters. (Superuser only.) */
5275     #define RNDCLEARPOOL _IO( 'R', 0x06 )
5276    
5277     +/* Reseed CRNG. (Superuser only.) */
5278     +#define RNDRESEEDCRNG _IO( 'R', 0x07 )
5279     +
5280     struct rand_pool_info {
5281     int entropy_count;
5282     int buf_size;
5283     diff --git a/ipc/shm.c b/ipc/shm.c
5284     index b469e910f887..a9cce632ed48 100644
5285     --- a/ipc/shm.c
5286     +++ b/ipc/shm.c
5287     @@ -203,6 +203,12 @@ static int __shm_open(struct vm_area_struct *vma)
5288     if (IS_ERR(shp))
5289     return PTR_ERR(shp);
5290    
5291     + if (shp->shm_file != sfd->file) {
5292     + /* ID was reused */
5293     + shm_unlock(shp);
5294     + return -EINVAL;
5295     + }
5296     +
5297     shp->shm_atim = ktime_get_real_seconds();
5298     shp->shm_lprid = task_tgid_vnr(current);
5299     shp->shm_nattch++;
5300     @@ -431,8 +437,9 @@ static int shm_mmap(struct file *file, struct vm_area_struct *vma)
5301     int ret;
5302    
5303     /*
5304     - * In case of remap_file_pages() emulation, the file can represent
5305     - * removed IPC ID: propogate shm_lock() error to caller.
5306     + * In case of remap_file_pages() emulation, the file can represent an
5307     + * IPC ID that was removed, and possibly even reused by another shm
5308     + * segment already. Propagate this case as an error to caller.
5309     */
5310     ret = __shm_open(vma);
5311     if (ret)
5312     @@ -456,6 +463,7 @@ static int shm_release(struct inode *ino, struct file *file)
5313     struct shm_file_data *sfd = shm_file_data(file);
5314    
5315     put_ipc_ns(sfd->ns);
5316     + fput(sfd->file);
5317     shm_file_data(file) = NULL;
5318     kfree(sfd);
5319     return 0;
5320     @@ -1391,7 +1399,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg,
5321     file->f_mapping = shp->shm_file->f_mapping;
5322     sfd->id = shp->shm_perm.id;
5323     sfd->ns = get_ipc_ns(ns);
5324     - sfd->file = shp->shm_file;
5325     + /*
5326     + * We need to take a reference to the real shm file to prevent the
5327     + * pointer from becoming stale in cases where the lifetime of the outer
5328     + * file extends beyond that of the shm segment. It's not usually
5329     + * possible, but it can happen during remap_file_pages() emulation as
5330     + * that unmaps the memory, then does ->mmap() via file reference only.
5331     + * We'll deny the ->mmap() if the shm segment was since removed, but to
5332     + * detect shm ID reuse we need to compare the file pointers.
5333     + */
5334     + sfd->file = get_file(shp->shm_file);
5335     sfd->vm_ops = NULL;
5336    
5337     err = security_mmap_file(file, prot, flags);
5338     diff --git a/kernel/resource.c b/kernel/resource.c
5339     index 9b5f04404152..7ee3dd1ad2af 100644
5340     --- a/kernel/resource.c
5341     +++ b/kernel/resource.c
5342     @@ -633,7 +633,8 @@ static int __find_resource(struct resource *root, struct resource *old,
5343     alloc.start = constraint->alignf(constraint->alignf_data, &avail,
5344     size, constraint->align);
5345     alloc.end = alloc.start + size - 1;
5346     - if (resource_contains(&avail, &alloc)) {
5347     + if (alloc.start <= alloc.end &&
5348     + resource_contains(&avail, &alloc)) {
5349     new->start = alloc.start;
5350     new->end = alloc.end;
5351     return 0;
5352     diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
5353     index 39c221454186..36f018b15392 100644
5354     --- a/kernel/trace/ring_buffer.c
5355     +++ b/kernel/trace/ring_buffer.c
5356     @@ -1136,6 +1136,11 @@ static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu)
5357     struct buffer_page *bpage, *tmp;
5358     long i;
5359    
5360     + /* Check if the available memory is there first */
5361     + i = si_mem_available();
5362     + if (i < nr_pages)
5363     + return -ENOMEM;
5364     +
5365     for (i = 0; i < nr_pages; i++) {
5366     struct page *page;
5367     /*
5368     diff --git a/mm/filemap.c b/mm/filemap.c
5369     index 594d73fef8b4..e2e738cc08b1 100644
5370     --- a/mm/filemap.c
5371     +++ b/mm/filemap.c
5372     @@ -688,7 +688,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
5373     VM_BUG_ON_PAGE(!PageLocked(new), new);
5374     VM_BUG_ON_PAGE(new->mapping, new);
5375    
5376     - error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM);
5377     + error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK);
5378     if (!error) {
5379     struct address_space *mapping = old->mapping;
5380     void (*freepage)(struct page *);
5381     @@ -744,7 +744,7 @@ static int __add_to_page_cache_locked(struct page *page,
5382     return error;
5383     }
5384    
5385     - error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM);
5386     + error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK);
5387     if (error) {
5388     if (!huge)
5389     mem_cgroup_cancel_charge(page, memcg, false);
5390     @@ -1486,8 +1486,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
5391     if (fgp_flags & FGP_ACCESSED)
5392     __SetPageReferenced(page);
5393    
5394     - err = add_to_page_cache_lru(page, mapping, offset,
5395     - gfp_mask & GFP_RECLAIM_MASK);
5396     + err = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
5397     if (unlikely(err)) {
5398     put_page(page);
5399     page = NULL;
5400     @@ -2275,7 +2274,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
5401     if (!page)
5402     return -ENOMEM;
5403    
5404     - ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL);
5405     + ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask);
5406     if (ret == 0)
5407     ret = mapping->a_ops->readpage(file, page);
5408     else if (ret == -EEXIST)
5409     diff --git a/mm/hmm.c b/mm/hmm.c
5410     index a88a847bccba..81ff1dbbf8a8 100644
5411     --- a/mm/hmm.c
5412     +++ b/mm/hmm.c
5413     @@ -277,7 +277,8 @@ static int hmm_pfns_bad(unsigned long addr,
5414     unsigned long end,
5415     struct mm_walk *walk)
5416     {
5417     - struct hmm_range *range = walk->private;
5418     + struct hmm_vma_walk *hmm_vma_walk = walk->private;
5419     + struct hmm_range *range = hmm_vma_walk->range;
5420     hmm_pfn_t *pfns = range->pfns;
5421     unsigned long i;
5422    
5423     diff --git a/mm/ksm.c b/mm/ksm.c
5424     index 6cb60f46cce5..5b6be9eeb095 100644
5425     --- a/mm/ksm.c
5426     +++ b/mm/ksm.c
5427     @@ -1133,6 +1133,13 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
5428     } else {
5429     newpte = pte_mkspecial(pfn_pte(page_to_pfn(kpage),
5430     vma->vm_page_prot));
5431     + /*
5432     + * We're replacing an anonymous page with a zero page, which is
5433     + * not anonymous. We need to do proper accounting otherwise we
5434     + * will get wrong values in /proc, and a BUG message in dmesg
5435     + * when tearing down the mm.
5436     + */
5437     + dec_mm_counter(mm, MM_ANONPAGES);
5438     }
5439    
5440     flush_cache_page(vma, addr, pte_pfn(*ptep));
5441     diff --git a/mm/page-writeback.c b/mm/page-writeback.c
5442     index 0b9c5cbe8eba..3175ac850a53 100644
5443     --- a/mm/page-writeback.c
5444     +++ b/mm/page-writeback.c
5445     @@ -2518,13 +2518,13 @@ void account_page_redirty(struct page *page)
5446     if (mapping && mapping_cap_account_dirty(mapping)) {
5447     struct inode *inode = mapping->host;
5448     struct bdi_writeback *wb;
5449     - bool locked;
5450     + struct wb_lock_cookie cookie = {};
5451    
5452     - wb = unlocked_inode_to_wb_begin(inode, &locked);
5453     + wb = unlocked_inode_to_wb_begin(inode, &cookie);
5454     current->nr_dirtied--;
5455     dec_node_page_state(page, NR_DIRTIED);
5456     dec_wb_stat(wb, WB_DIRTIED);
5457     - unlocked_inode_to_wb_end(inode, locked);
5458     + unlocked_inode_to_wb_end(inode, &cookie);
5459     }
5460     }
5461     EXPORT_SYMBOL(account_page_redirty);
5462     @@ -2630,15 +2630,15 @@ void cancel_dirty_page(struct page *page)
5463     if (mapping_cap_account_dirty(mapping)) {
5464     struct inode *inode = mapping->host;
5465     struct bdi_writeback *wb;
5466     - bool locked;
5467     + struct wb_lock_cookie cookie = {};
5468    
5469     lock_page_memcg(page);
5470     - wb = unlocked_inode_to_wb_begin(inode, &locked);
5471     + wb = unlocked_inode_to_wb_begin(inode, &cookie);
5472    
5473     if (TestClearPageDirty(page))
5474     account_page_cleaned(page, mapping, wb);
5475    
5476     - unlocked_inode_to_wb_end(inode, locked);
5477     + unlocked_inode_to_wb_end(inode, &cookie);
5478     unlock_page_memcg(page);
5479     } else {
5480     ClearPageDirty(page);
5481     @@ -2670,7 +2670,7 @@ int clear_page_dirty_for_io(struct page *page)
5482     if (mapping && mapping_cap_account_dirty(mapping)) {
5483     struct inode *inode = mapping->host;
5484     struct bdi_writeback *wb;
5485     - bool locked;
5486     + struct wb_lock_cookie cookie = {};
5487    
5488     /*
5489     * Yes, Virginia, this is indeed insane.
5490     @@ -2707,14 +2707,14 @@ int clear_page_dirty_for_io(struct page *page)
5491     * always locked coming in here, so we get the desired
5492     * exclusion.
5493     */
5494     - wb = unlocked_inode_to_wb_begin(inode, &locked);
5495     + wb = unlocked_inode_to_wb_begin(inode, &cookie);
5496     if (TestClearPageDirty(page)) {
5497     dec_lruvec_page_state(page, NR_FILE_DIRTY);
5498     dec_zone_page_state(page, NR_ZONE_WRITE_PENDING);
5499     dec_wb_stat(wb, WB_RECLAIMABLE);
5500     ret = 1;
5501     }
5502     - unlocked_inode_to_wb_end(inode, locked);
5503     + unlocked_inode_to_wb_end(inode, &cookie);
5504     return ret;
5505     }
5506     return TestClearPageDirty(page);
5507     diff --git a/mm/slab.c b/mm/slab.c
5508     index 966839a1ac2c..1bfc3d847a0a 100644
5509     --- a/mm/slab.c
5510     +++ b/mm/slab.c
5511     @@ -4080,7 +4080,8 @@ static void cache_reap(struct work_struct *w)
5512     next_reap_node();
5513     out:
5514     /* Set up the next iteration */
5515     - schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_AC));
5516     + schedule_delayed_work_on(smp_processor_id(), work,
5517     + round_jiffies_relative(REAPTIMEOUT_AC));
5518     }
5519    
5520     #ifdef CONFIG_SLABINFO
5521     diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
5522     index dbb016434ace..de92fc1fc3be 100644
5523     --- a/net/dsa/tag_brcm.c
5524     +++ b/net/dsa/tag_brcm.c
5525     @@ -121,6 +121,9 @@ static struct sk_buff *brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
5526     if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
5527     return NULL;
5528    
5529     + if (unlikely(ds->cpu_port_mask & BIT(source_port)))
5530     + return NULL;
5531     +
5532     /* Remove Broadcom tag and update checksum */
5533     skb_pull_rcsum(skb, BRCM_TAG_LEN);
5534    
5535     diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
5536     index fbf9ca954773..b3008a9bacf3 100644
5537     --- a/net/dsa/tag_dsa.c
5538     +++ b/net/dsa/tag_dsa.c
5539     @@ -107,6 +107,9 @@ static struct sk_buff *dsa_rcv(struct sk_buff *skb, struct net_device *dev,
5540     if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
5541     return NULL;
5542    
5543     + if (unlikely(ds->cpu_port_mask & BIT(source_port)))
5544     + return NULL;
5545     +
5546     /*
5547     * Convert the DSA header to an 802.1q header if the 'tagged'
5548     * bit in the DSA header is set. If the 'tagged' bit is clear,
5549     diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
5550     index 76367ba1b2e2..c86b6d90576d 100644
5551     --- a/net/dsa/tag_edsa.c
5552     +++ b/net/dsa/tag_edsa.c
5553     @@ -120,6 +120,9 @@ static struct sk_buff *edsa_rcv(struct sk_buff *skb, struct net_device *dev,
5554     if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
5555     return NULL;
5556    
5557     + if (unlikely(ds->cpu_port_mask & BIT(source_port)))
5558     + return NULL;
5559     +
5560     /*
5561     * If the 'tagged' bit is set, convert the DSA tag to a 802.1q
5562     * tag and delete the ethertype part. If the 'tagged' bit is
5563     diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c
5564     index 010ca0a336c4..6c894692b9cd 100644
5565     --- a/net/dsa/tag_ksz.c
5566     +++ b/net/dsa/tag_ksz.c
5567     @@ -92,6 +92,9 @@ static struct sk_buff *ksz_rcv(struct sk_buff *skb, struct net_device *dev,
5568     if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
5569     return NULL;
5570    
5571     + if (unlikely(ds->cpu_port_mask & BIT(source_port)))
5572     + return NULL;
5573     +
5574     pskb_trim_rcsum(skb, skb->len - KSZ_EGRESS_TAG_LEN);
5575    
5576     skb->dev = ds->ports[source_port].netdev;
5577     diff --git a/net/dsa/tag_lan9303.c b/net/dsa/tag_lan9303.c
5578     index 0b9826105e42..2d1603009e16 100644
5579     --- a/net/dsa/tag_lan9303.c
5580     +++ b/net/dsa/tag_lan9303.c
5581     @@ -108,6 +108,9 @@ static struct sk_buff *lan9303_rcv(struct sk_buff *skb, struct net_device *dev,
5582     return NULL;
5583     }
5584    
5585     + if (unlikely(ds->cpu_port_mask & BIT(source_port)))
5586     + return NULL;
5587     +
5588     if (!ds->ports[source_port].netdev) {
5589     dev_warn_ratelimited(&dev->dev, "Dropping packet due to invalid netdev or device\n");
5590     return NULL;
5591     diff --git a/net/dsa/tag_mtk.c b/net/dsa/tag_mtk.c
5592     index ec8ee5f43255..5c471854412d 100644
5593     --- a/net/dsa/tag_mtk.c
5594     +++ b/net/dsa/tag_mtk.c
5595     @@ -81,6 +81,9 @@ static struct sk_buff *mtk_tag_rcv(struct sk_buff *skb, struct net_device *dev,
5596     if (!ds->ports[port].netdev)
5597     return NULL;
5598    
5599     + if (unlikely(ds->cpu_port_mask & BIT(port)))
5600     + return NULL;
5601     +
5602     skb->dev = ds->ports[port].netdev;
5603    
5604     return skb;
5605     diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
5606     index 1d4c70711c0f..b8c05f1cf47d 100644
5607     --- a/net/dsa/tag_qca.c
5608     +++ b/net/dsa/tag_qca.c
5609     @@ -104,6 +104,9 @@ static struct sk_buff *qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
5610     if (!ds->ports[port].netdev)
5611     return NULL;
5612    
5613     + if (unlikely(ds->cpu_port_mask & BIT(port)))
5614     + return NULL;
5615     +
5616     /* Update skb & forward the frame accordingly */
5617     skb->dev = ds->ports[port].netdev;
5618    
5619     diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
5620     index d2fd4923aa3e..fcc9aa72877d 100644
5621     --- a/net/dsa/tag_trailer.c
5622     +++ b/net/dsa/tag_trailer.c
5623     @@ -76,6 +76,9 @@ static struct sk_buff *trailer_rcv(struct sk_buff *skb, struct net_device *dev,
5624     if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
5625     return NULL;
5626    
5627     + if (unlikely(ds->cpu_port_mask & BIT(source_port)))
5628     + return NULL;
5629     +
5630     pskb_trim_rcsum(skb, skb->len - 4);
5631    
5632     skb->dev = ds->ports[source_port].netdev;
5633     diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
5634     index 61a504fb1ae2..34f94052c519 100644
5635     --- a/net/sunrpc/rpc_pipe.c
5636     +++ b/net/sunrpc/rpc_pipe.c
5637     @@ -1375,6 +1375,7 @@ rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry)
5638     struct dentry *clnt_dir = pipe_dentry->d_parent;
5639     struct dentry *gssd_dir = clnt_dir->d_parent;
5640    
5641     + dget(pipe_dentry);
5642     __rpc_rmpipe(d_inode(clnt_dir), pipe_dentry);
5643     __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1);
5644     __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1);
5645     diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
5646     index d6e9a18fd821..b4f954e6d2db 100644
5647     --- a/sound/core/oss/pcm_oss.c
5648     +++ b/sound/core/oss/pcm_oss.c
5649     @@ -823,8 +823,25 @@ static int choose_rate(struct snd_pcm_substream *substream,
5650     return snd_pcm_hw_param_near(substream, params, SNDRV_PCM_HW_PARAM_RATE, best_rate, NULL);
5651     }
5652    
5653     -static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
5654     - bool trylock)
5655     +/* parameter locking: returns immediately if tried during streaming */
5656     +static int lock_params(struct snd_pcm_runtime *runtime)
5657     +{
5658     + if (mutex_lock_interruptible(&runtime->oss.params_lock))
5659     + return -ERESTARTSYS;
5660     + if (atomic_read(&runtime->oss.rw_ref)) {
5661     + mutex_unlock(&runtime->oss.params_lock);
5662     + return -EBUSY;
5663     + }
5664     + return 0;
5665     +}
5666     +
5667     +static void unlock_params(struct snd_pcm_runtime *runtime)
5668     +{
5669     + mutex_unlock(&runtime->oss.params_lock);
5670     +}
5671     +
5672     +/* call with params_lock held */
5673     +static int snd_pcm_oss_change_params_locked(struct snd_pcm_substream *substream)
5674     {
5675     struct snd_pcm_runtime *runtime = substream->runtime;
5676     struct snd_pcm_hw_params *params, *sparams;
5677     @@ -838,11 +855,8 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
5678     const struct snd_mask *sformat_mask;
5679     struct snd_mask mask;
5680    
5681     - if (trylock) {
5682     - if (!(mutex_trylock(&runtime->oss.params_lock)))
5683     - return -EAGAIN;
5684     - } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
5685     - return -EINTR;
5686     + if (!runtime->oss.params)
5687     + return 0;
5688     sw_params = kzalloc(sizeof(*sw_params), GFP_KERNEL);
5689     params = kmalloc(sizeof(*params), GFP_KERNEL);
5690     sparams = kmalloc(sizeof(*sparams), GFP_KERNEL);
5691     @@ -1068,6 +1082,23 @@ static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
5692     kfree(sw_params);
5693     kfree(params);
5694     kfree(sparams);
5695     + return err;
5696     +}
5697     +
5698     +/* this one takes the lock by itself */
5699     +static int snd_pcm_oss_change_params(struct snd_pcm_substream *substream,
5700     + bool trylock)
5701     +{
5702     + struct snd_pcm_runtime *runtime = substream->runtime;
5703     + int err;
5704     +
5705     + if (trylock) {
5706     + if (!(mutex_trylock(&runtime->oss.params_lock)))
5707     + return -EAGAIN;
5708     + } else if (mutex_lock_interruptible(&runtime->oss.params_lock))
5709     + return -ERESTARTSYS;
5710     +
5711     + err = snd_pcm_oss_change_params_locked(substream);
5712     mutex_unlock(&runtime->oss.params_lock);
5713     return err;
5714     }
5715     @@ -1096,6 +1127,10 @@ static int snd_pcm_oss_get_active_substream(struct snd_pcm_oss_file *pcm_oss_fil
5716     return 0;
5717     }
5718    
5719     +/* call with params_lock held */
5720     +/* NOTE: this always call PREPARE unconditionally no matter whether
5721     + * runtime->oss.prepare is set or not
5722     + */
5723     static int snd_pcm_oss_prepare(struct snd_pcm_substream *substream)
5724     {
5725     int err;
5726     @@ -1120,14 +1155,35 @@ static int snd_pcm_oss_make_ready(struct snd_pcm_substream *substream)
5727     struct snd_pcm_runtime *runtime;
5728     int err;
5729    
5730     - if (substream == NULL)
5731     - return 0;
5732     runtime = substream->runtime;
5733     if (runtime->oss.params) {
5734     err = snd_pcm_oss_change_params(substream, false);
5735     if (err < 0)
5736     return err;
5737     }
5738     + if (runtime->oss.prepare) {
5739     + if (mutex_lock_interruptible(&runtime->oss.params_lock))
5740     + return -ERESTARTSYS;
5741     + err = snd_pcm_oss_prepare(substream);
5742     + mutex_unlock(&runtime->oss.params_lock);
5743     + if (err < 0)
5744     + return err;
5745     + }
5746     + return 0;
5747     +}
5748     +
5749     +/* call with params_lock held */
5750     +static int snd_pcm_oss_make_ready_locked(struct snd_pcm_substream *substream)
5751     +{
5752     + struct snd_pcm_runtime *runtime;
5753     + int err;
5754     +
5755     + runtime = substream->runtime;
5756     + if (runtime->oss.params) {
5757     + err = snd_pcm_oss_change_params_locked(substream);
5758     + if (err < 0)
5759     + return err;
5760     + }
5761     if (runtime->oss.prepare) {
5762     err = snd_pcm_oss_prepare(substream);
5763     if (err < 0)
5764     @@ -1332,13 +1388,15 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
5765     if (atomic_read(&substream->mmap_count))
5766     return -ENXIO;
5767    
5768     - if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
5769     - return tmp;
5770     + atomic_inc(&runtime->oss.rw_ref);
5771     while (bytes > 0) {
5772     if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
5773     tmp = -ERESTARTSYS;
5774     break;
5775     }
5776     + tmp = snd_pcm_oss_make_ready_locked(substream);
5777     + if (tmp < 0)
5778     + goto err;
5779     if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
5780     tmp = bytes;
5781     if (tmp + runtime->oss.buffer_used > runtime->oss.period_bytes)
5782     @@ -1394,6 +1452,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
5783     }
5784     tmp = 0;
5785     }
5786     + atomic_dec(&runtime->oss.rw_ref);
5787     return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
5788     }
5789    
5790     @@ -1439,13 +1498,15 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
5791     if (atomic_read(&substream->mmap_count))
5792     return -ENXIO;
5793    
5794     - if ((tmp = snd_pcm_oss_make_ready(substream)) < 0)
5795     - return tmp;
5796     + atomic_inc(&runtime->oss.rw_ref);
5797     while (bytes > 0) {
5798     if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
5799     tmp = -ERESTARTSYS;
5800     break;
5801     }
5802     + tmp = snd_pcm_oss_make_ready_locked(substream);
5803     + if (tmp < 0)
5804     + goto err;
5805     if (bytes < runtime->oss.period_bytes || runtime->oss.buffer_used > 0) {
5806     if (runtime->oss.buffer_used == 0) {
5807     tmp = snd_pcm_oss_read2(substream, runtime->oss.buffer, runtime->oss.period_bytes, 1);
5808     @@ -1486,6 +1547,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
5809     }
5810     tmp = 0;
5811     }
5812     + atomic_dec(&runtime->oss.rw_ref);
5813     return xfer > 0 ? (snd_pcm_sframes_t)xfer : tmp;
5814     }
5815    
5816     @@ -1501,10 +1563,12 @@ static int snd_pcm_oss_reset(struct snd_pcm_oss_file *pcm_oss_file)
5817     continue;
5818     runtime = substream->runtime;
5819     snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
5820     + mutex_lock(&runtime->oss.params_lock);
5821     runtime->oss.prepare = 1;
5822     runtime->oss.buffer_used = 0;
5823     runtime->oss.prev_hw_ptr_period = 0;
5824     runtime->oss.period_ptr = 0;
5825     + mutex_unlock(&runtime->oss.params_lock);
5826     }
5827     return 0;
5828     }
5829     @@ -1590,9 +1654,13 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
5830     goto __direct;
5831     if ((err = snd_pcm_oss_make_ready(substream)) < 0)
5832     return err;
5833     + atomic_inc(&runtime->oss.rw_ref);
5834     + if (mutex_lock_interruptible(&runtime->oss.params_lock)) {
5835     + atomic_dec(&runtime->oss.rw_ref);
5836     + return -ERESTARTSYS;
5837     + }
5838     format = snd_pcm_oss_format_from(runtime->oss.format);
5839     width = snd_pcm_format_physical_width(format);
5840     - mutex_lock(&runtime->oss.params_lock);
5841     if (runtime->oss.buffer_used > 0) {
5842     #ifdef OSS_DEBUG
5843     pcm_dbg(substream->pcm, "sync: buffer_used\n");
5844     @@ -1602,10 +1670,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
5845     runtime->oss.buffer + runtime->oss.buffer_used,
5846     size);
5847     err = snd_pcm_oss_sync1(substream, runtime->oss.period_bytes);
5848     - if (err < 0) {
5849     - mutex_unlock(&runtime->oss.params_lock);
5850     - return err;
5851     - }
5852     + if (err < 0)
5853     + goto unlock;
5854     } else if (runtime->oss.period_ptr > 0) {
5855     #ifdef OSS_DEBUG
5856     pcm_dbg(substream->pcm, "sync: period_ptr\n");
5857     @@ -1615,10 +1681,8 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
5858     runtime->oss.buffer,
5859     size * 8 / width);
5860     err = snd_pcm_oss_sync1(substream, size);
5861     - if (err < 0) {
5862     - mutex_unlock(&runtime->oss.params_lock);
5863     - return err;
5864     - }
5865     + if (err < 0)
5866     + goto unlock;
5867     }
5868     /*
5869     * The ALSA's period might be a bit large than OSS one.
5870     @@ -1632,7 +1696,11 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
5871     else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED)
5872     snd_pcm_lib_writev(substream, NULL, size);
5873     }
5874     +unlock:
5875     mutex_unlock(&runtime->oss.params_lock);
5876     + atomic_dec(&runtime->oss.rw_ref);
5877     + if (err < 0)
5878     + return err;
5879     /*
5880     * finish sync: drain the buffer
5881     */
5882     @@ -1643,7 +1711,9 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
5883     substream->f_flags = saved_f_flags;
5884     if (err < 0)
5885     return err;
5886     + mutex_lock(&runtime->oss.params_lock);
5887     runtime->oss.prepare = 1;
5888     + mutex_unlock(&runtime->oss.params_lock);
5889     }
5890    
5891     substream = pcm_oss_file->streams[SNDRV_PCM_STREAM_CAPTURE];
5892     @@ -1654,8 +1724,10 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
5893     err = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
5894     if (err < 0)
5895     return err;
5896     + mutex_lock(&runtime->oss.params_lock);
5897     runtime->oss.buffer_used = 0;
5898     runtime->oss.prepare = 1;
5899     + mutex_unlock(&runtime->oss.params_lock);
5900     }
5901     return 0;
5902     }
5903     @@ -1667,6 +1739,8 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
5904     for (idx = 1; idx >= 0; --idx) {
5905     struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
5906     struct snd_pcm_runtime *runtime;
5907     + int err;
5908     +
5909     if (substream == NULL)
5910     continue;
5911     runtime = substream->runtime;
5912     @@ -1674,10 +1748,14 @@ static int snd_pcm_oss_set_rate(struct snd_pcm_oss_file *pcm_oss_file, int rate)
5913     rate = 1000;
5914     else if (rate > 192000)
5915     rate = 192000;
5916     + err = lock_params(runtime);
5917     + if (err < 0)
5918     + return err;
5919     if (runtime->oss.rate != rate) {
5920     runtime->oss.params = 1;
5921     runtime->oss.rate = rate;
5922     }
5923     + unlock_params(runtime);
5924     }
5925     return snd_pcm_oss_get_rate(pcm_oss_file);
5926     }
5927     @@ -1702,13 +1780,19 @@ static int snd_pcm_oss_set_channels(struct snd_pcm_oss_file *pcm_oss_file, unsig
5928     for (idx = 1; idx >= 0; --idx) {
5929     struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
5930     struct snd_pcm_runtime *runtime;
5931     + int err;
5932     +
5933     if (substream == NULL)
5934     continue;
5935     runtime = substream->runtime;
5936     + err = lock_params(runtime);
5937     + if (err < 0)
5938     + return err;
5939     if (runtime->oss.channels != channels) {
5940     runtime->oss.params = 1;
5941     runtime->oss.channels = channels;
5942     }
5943     + unlock_params(runtime);
5944     }
5945     return snd_pcm_oss_get_channels(pcm_oss_file);
5946     }
5947     @@ -1781,6 +1865,7 @@ static int snd_pcm_oss_get_formats(struct snd_pcm_oss_file *pcm_oss_file)
5948     static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int format)
5949     {
5950     int formats, idx;
5951     + int err;
5952    
5953     if (format != AFMT_QUERY) {
5954     formats = snd_pcm_oss_get_formats(pcm_oss_file);
5955     @@ -1794,10 +1879,14 @@ static int snd_pcm_oss_set_format(struct snd_pcm_oss_file *pcm_oss_file, int for
5956     if (substream == NULL)
5957     continue;
5958     runtime = substream->runtime;
5959     + err = lock_params(runtime);
5960     + if (err < 0)
5961     + return err;
5962     if (runtime->oss.format != format) {
5963     runtime->oss.params = 1;
5964     runtime->oss.format = format;
5965     }
5966     + unlock_params(runtime);
5967     }
5968     }
5969     return snd_pcm_oss_get_format(pcm_oss_file);
5970     @@ -1817,8 +1906,6 @@ static int snd_pcm_oss_set_subdivide1(struct snd_pcm_substream *substream, int s
5971     {
5972     struct snd_pcm_runtime *runtime;
5973    
5974     - if (substream == NULL)
5975     - return 0;
5976     runtime = substream->runtime;
5977     if (subdivide == 0) {
5978     subdivide = runtime->oss.subdivision;
5979     @@ -1842,9 +1929,17 @@ static int snd_pcm_oss_set_subdivide(struct snd_pcm_oss_file *pcm_oss_file, int
5980    
5981     for (idx = 1; idx >= 0; --idx) {
5982     struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
5983     + struct snd_pcm_runtime *runtime;
5984     +
5985     if (substream == NULL)
5986     continue;
5987     - if ((err = snd_pcm_oss_set_subdivide1(substream, subdivide)) < 0)
5988     + runtime = substream->runtime;
5989     + err = lock_params(runtime);
5990     + if (err < 0)
5991     + return err;
5992     + err = snd_pcm_oss_set_subdivide1(substream, subdivide);
5993     + unlock_params(runtime);
5994     + if (err < 0)
5995     return err;
5996     }
5997     return err;
5998     @@ -1854,8 +1949,6 @@ static int snd_pcm_oss_set_fragment1(struct snd_pcm_substream *substream, unsign
5999     {
6000     struct snd_pcm_runtime *runtime;
6001    
6002     - if (substream == NULL)
6003     - return 0;
6004     runtime = substream->runtime;
6005     if (runtime->oss.subdivision || runtime->oss.fragshift)
6006     return -EINVAL;
6007     @@ -1875,9 +1968,17 @@ static int snd_pcm_oss_set_fragment(struct snd_pcm_oss_file *pcm_oss_file, unsig
6008    
6009     for (idx = 1; idx >= 0; --idx) {
6010     struct snd_pcm_substream *substream = pcm_oss_file->streams[idx];
6011     + struct snd_pcm_runtime *runtime;
6012     +
6013     if (substream == NULL)
6014     continue;
6015     - if ((err = snd_pcm_oss_set_fragment1(substream, val)) < 0)
6016     + runtime = substream->runtime;
6017     + err = lock_params(runtime);
6018     + if (err < 0)
6019     + return err;
6020     + err = snd_pcm_oss_set_fragment1(substream, val);
6021     + unlock_params(runtime);
6022     + if (err < 0)
6023     return err;
6024     }
6025     return err;
6026     @@ -1961,6 +2062,9 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
6027     }
6028     if (psubstream) {
6029     runtime = psubstream->runtime;
6030     + cmd = 0;
6031     + if (mutex_lock_interruptible(&runtime->oss.params_lock))
6032     + return -ERESTARTSYS;
6033     if (trigger & PCM_ENABLE_OUTPUT) {
6034     if (runtime->oss.trigger)
6035     goto _skip1;
6036     @@ -1978,13 +2082,19 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
6037     cmd = SNDRV_PCM_IOCTL_DROP;
6038     runtime->oss.prepare = 1;
6039     }
6040     - err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
6041     - if (err < 0)
6042     - return err;
6043     - }
6044     _skip1:
6045     + mutex_unlock(&runtime->oss.params_lock);
6046     + if (cmd) {
6047     + err = snd_pcm_kernel_ioctl(psubstream, cmd, NULL);
6048     + if (err < 0)
6049     + return err;
6050     + }
6051     + }
6052     if (csubstream) {
6053     runtime = csubstream->runtime;
6054     + cmd = 0;
6055     + if (mutex_lock_interruptible(&runtime->oss.params_lock))
6056     + return -ERESTARTSYS;
6057     if (trigger & PCM_ENABLE_INPUT) {
6058     if (runtime->oss.trigger)
6059     goto _skip2;
6060     @@ -1999,11 +2109,14 @@ static int snd_pcm_oss_set_trigger(struct snd_pcm_oss_file *pcm_oss_file, int tr
6061     cmd = SNDRV_PCM_IOCTL_DROP;
6062     runtime->oss.prepare = 1;
6063     }
6064     - err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
6065     - if (err < 0)
6066     - return err;
6067     - }
6068     _skip2:
6069     + mutex_unlock(&runtime->oss.params_lock);
6070     + if (cmd) {
6071     + err = snd_pcm_kernel_ioctl(csubstream, cmd, NULL);
6072     + if (err < 0)
6073     + return err;
6074     + }
6075     + }
6076     return 0;
6077     }
6078    
6079     @@ -2255,6 +2368,7 @@ static void snd_pcm_oss_init_substream(struct snd_pcm_substream *substream,
6080     runtime->oss.maxfrags = 0;
6081     runtime->oss.subdivision = 0;
6082     substream->pcm_release = snd_pcm_oss_release_substream;
6083     + atomic_set(&runtime->oss.rw_ref, 0);
6084     }
6085    
6086     static int snd_pcm_oss_release_file(struct snd_pcm_oss_file *pcm_oss_file)
6087     diff --git a/sound/core/pcm.c b/sound/core/pcm.c
6088     index 7fea724d093a..e8dc1a5afe66 100644
6089     --- a/sound/core/pcm.c
6090     +++ b/sound/core/pcm.c
6091     @@ -28,6 +28,7 @@
6092     #include <sound/core.h>
6093     #include <sound/minors.h>
6094     #include <sound/pcm.h>
6095     +#include <sound/timer.h>
6096     #include <sound/control.h>
6097     #include <sound/info.h>
6098    
6099     @@ -1050,8 +1051,13 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream)
6100     snd_free_pages((void*)runtime->control,
6101     PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control)));
6102     kfree(runtime->hw_constraints.rules);
6103     - kfree(runtime);
6104     + /* Avoid concurrent access to runtime via PCM timer interface */
6105     + if (substream->timer)
6106     + spin_lock_irq(&substream->timer->lock);
6107     substream->runtime = NULL;
6108     + if (substream->timer)
6109     + spin_unlock_irq(&substream->timer->lock);
6110     + kfree(runtime);
6111     put_pid(substream->pid);
6112     substream->pid = NULL;
6113     substream->pstr->substream_opened--;
6114     diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c
6115     index f69764d7cdd7..e30e30ba6e39 100644
6116     --- a/sound/core/rawmidi_compat.c
6117     +++ b/sound/core/rawmidi_compat.c
6118     @@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
6119     struct snd_rawmidi_params params;
6120     unsigned int val;
6121    
6122     - if (rfile->output == NULL)
6123     - return -EINVAL;
6124     if (get_user(params.stream, &src->stream) ||
6125     get_user(params.buffer_size, &src->buffer_size) ||
6126     get_user(params.avail_min, &src->avail_min) ||
6127     @@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile,
6128     params.no_active_sensing = val;
6129     switch (params.stream) {
6130     case SNDRV_RAWMIDI_STREAM_OUTPUT:
6131     + if (!rfile->output)
6132     + return -EINVAL;
6133     return snd_rawmidi_output_params(rfile->output, &params);
6134     case SNDRV_RAWMIDI_STREAM_INPUT:
6135     + if (!rfile->input)
6136     + return -EINVAL;
6137     return snd_rawmidi_input_params(rfile->input, &params);
6138     }
6139     return -EINVAL;
6140     @@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile,
6141     int err;
6142     struct snd_rawmidi_status status;
6143    
6144     - if (rfile->output == NULL)
6145     - return -EINVAL;
6146     if (get_user(status.stream, &src->stream))
6147     return -EFAULT;
6148    
6149     switch (status.stream) {
6150     case SNDRV_RAWMIDI_STREAM_OUTPUT:
6151     + if (!rfile->output)
6152     + return -EINVAL;
6153     err = snd_rawmidi_output_status(rfile->output, &status);
6154     break;
6155     case SNDRV_RAWMIDI_STREAM_INPUT:
6156     + if (!rfile->input)
6157     + return -EINVAL;
6158     err = snd_rawmidi_input_status(rfile->input, &status);
6159     break;
6160     default:
6161     @@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile,
6162     int err;
6163     struct snd_rawmidi_status status;
6164    
6165     - if (rfile->output == NULL)
6166     - return -EINVAL;
6167     if (get_user(status.stream, &src->stream))
6168     return -EFAULT;
6169    
6170     switch (status.stream) {
6171     case SNDRV_RAWMIDI_STREAM_OUTPUT:
6172     + if (!rfile->output)
6173     + return -EINVAL;
6174     err = snd_rawmidi_output_status(rfile->output, &status);
6175     break;
6176     case SNDRV_RAWMIDI_STREAM_INPUT:
6177     + if (!rfile->input)
6178     + return -EINVAL;
6179     err = snd_rawmidi_input_status(rfile->input, &status);
6180     break;
6181     default:
6182     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
6183     index c507c69029e3..738e1fe90312 100644
6184     --- a/sound/pci/hda/hda_intel.c
6185     +++ b/sound/pci/hda/hda_intel.c
6186     @@ -1645,7 +1645,8 @@ static void azx_check_snoop_available(struct azx *chip)
6187     */
6188     u8 val;
6189     pci_read_config_byte(chip->pci, 0x42, &val);
6190     - if (!(val & 0x80) && chip->pci->revision == 0x30)
6191     + if (!(val & 0x80) && (chip->pci->revision == 0x30 ||
6192     + chip->pci->revision == 0x20))
6193     snoop = false;
6194     }
6195    
6196     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
6197     index e44a9758f2eb..b1b28c6928a7 100644
6198     --- a/sound/pci/hda/patch_realtek.c
6199     +++ b/sound/pci/hda/patch_realtek.c
6200     @@ -6225,6 +6225,8 @@ static const struct hda_fixup alc269_fixups[] = {
6201     { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */
6202     { }
6203     },
6204     + .chained = true,
6205     + .chain_id = ALC269_FIXUP_HEADSET_MIC
6206     },
6207     };
6208    
6209     @@ -6428,6 +6430,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
6210     SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6211     SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6212     SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6213     + SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6214     SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
6215     SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
6216     SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
6217     diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c
6218     index 9b341c23f62b..5e80867d09ef 100644
6219     --- a/sound/soc/codecs/ssm2602.c
6220     +++ b/sound/soc/codecs/ssm2602.c
6221     @@ -54,10 +54,17 @@ struct ssm2602_priv {
6222     * using 2 wire for device control, so we cache them instead.
6223     * There is no point in caching the reset register
6224     */
6225     -static const u16 ssm2602_reg[SSM2602_CACHEREGNUM] = {
6226     - 0x0097, 0x0097, 0x0079, 0x0079,
6227     - 0x000a, 0x0008, 0x009f, 0x000a,
6228     - 0x0000, 0x0000
6229     +static const struct reg_default ssm2602_reg[SSM2602_CACHEREGNUM] = {
6230     + { .reg = 0x00, .def = 0x0097 },
6231     + { .reg = 0x01, .def = 0x0097 },
6232     + { .reg = 0x02, .def = 0x0079 },
6233     + { .reg = 0x03, .def = 0x0079 },
6234     + { .reg = 0x04, .def = 0x000a },
6235     + { .reg = 0x05, .def = 0x0008 },
6236     + { .reg = 0x06, .def = 0x009f },
6237     + { .reg = 0x07, .def = 0x000a },
6238     + { .reg = 0x08, .def = 0x0000 },
6239     + { .reg = 0x09, .def = 0x0000 }
6240     };
6241    
6242    
6243     @@ -620,8 +627,8 @@ const struct regmap_config ssm2602_regmap_config = {
6244     .volatile_reg = ssm2602_register_volatile,
6245    
6246     .cache_type = REGCACHE_RBTREE,
6247     - .reg_defaults_raw = ssm2602_reg,
6248     - .num_reg_defaults_raw = ARRAY_SIZE(ssm2602_reg),
6249     + .reg_defaults = ssm2602_reg,
6250     + .num_reg_defaults = ARRAY_SIZE(ssm2602_reg),
6251     };
6252     EXPORT_SYMBOL_GPL(ssm2602_regmap_config);
6253    
6254     diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
6255     index 01a50413c66f..782c580b7aa3 100644
6256     --- a/sound/soc/soc-topology.c
6257     +++ b/sound/soc/soc-topology.c
6258     @@ -523,6 +523,7 @@ static void remove_widget(struct snd_soc_component *comp,
6259     kfree(se->dobj.control.dtexts[j]);
6260    
6261     kfree(se);
6262     + kfree(w->kcontrol_news[i].name);
6263     }
6264     kfree(w->kcontrol_news);
6265     } else {
6266     @@ -540,6 +541,7 @@ static void remove_widget(struct snd_soc_component *comp,
6267     */
6268     kfree((void *)kcontrol->private_value);
6269     snd_ctl_remove(card, kcontrol);
6270     + kfree(w->kcontrol_news[i].name);
6271     }
6272     kfree(w->kcontrol_news);
6273     }
6274     @@ -1233,7 +1235,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
6275     dev_dbg(tplg->dev, " adding DAPM widget mixer control %s at %d\n",
6276     mc->hdr.name, i);
6277    
6278     - kc[i].name = mc->hdr.name;
6279     + kc[i].name = kstrdup(mc->hdr.name, GFP_KERNEL);
6280     + if (kc[i].name == NULL)
6281     + goto err_str;
6282     kc[i].private_value = (long)sm;
6283     kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
6284     kc[i].access = mc->hdr.access;
6285     @@ -1278,8 +1282,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dmixer_create(
6286     err_str:
6287     kfree(sm);
6288     err:
6289     - for (--i; i >= 0; i--)
6290     + for (--i; i >= 0; i--) {
6291     kfree((void *)kc[i].private_value);
6292     + kfree(kc[i].name);
6293     + }
6294     kfree(kc);
6295     return NULL;
6296     }
6297     @@ -1310,7 +1316,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
6298     dev_dbg(tplg->dev, " adding DAPM widget enum control %s\n",
6299     ec->hdr.name);
6300    
6301     - kc[i].name = ec->hdr.name;
6302     + kc[i].name = kstrdup(ec->hdr.name, GFP_KERNEL);
6303     + if (kc[i].name == NULL)
6304     + goto err_se;
6305     kc[i].private_value = (long)se;
6306     kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
6307     kc[i].access = ec->hdr.access;
6308     @@ -1386,6 +1394,7 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_denum_create(
6309     kfree(se->dobj.control.dtexts[j]);
6310    
6311     kfree(se);
6312     + kfree(kc[i].name);
6313     }
6314     err:
6315     kfree(kc);
6316     @@ -1424,7 +1433,9 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
6317     "ASoC: adding bytes kcontrol %s with access 0x%x\n",
6318     be->hdr.name, be->hdr.access);
6319    
6320     - kc[i].name = be->hdr.name;
6321     + kc[i].name = kstrdup(be->hdr.name, GFP_KERNEL);
6322     + if (kc[i].name == NULL)
6323     + goto err;
6324     kc[i].private_value = (long)sbe;
6325     kc[i].iface = SNDRV_CTL_ELEM_IFACE_MIXER;
6326     kc[i].access = be->hdr.access;
6327     @@ -1454,8 +1465,10 @@ static struct snd_kcontrol_new *soc_tplg_dapm_widget_dbytes_create(
6328     return kc;
6329    
6330     err:
6331     - for (--i; i >= 0; i--)
6332     + for (--i; i >= 0; i--) {
6333     kfree((void *)kc[i].private_value);
6334     + kfree(kc[i].name);
6335     + }
6336    
6337     kfree(kc);
6338     return NULL;
6339     diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c
6340     index 1d3a23b02d68..b5d62b5f59ba 100644
6341     --- a/sound/usb/line6/midi.c
6342     +++ b/sound/usb/line6/midi.c
6343     @@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data,
6344     }
6345    
6346     usb_fill_int_urb(urb, line6->usbdev,
6347     - usb_sndbulkpipe(line6->usbdev,
6348     + usb_sndintpipe(line6->usbdev,
6349     line6->properties->ep_ctrl_w),
6350     transfer_buffer, length, midi_sent, line6,
6351     line6->interval);
6352     diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
6353     index 59ce2fb49821..5a11f4d3972c 100644
6354     --- a/virt/kvm/arm/vgic/vgic-its.c
6355     +++ b/virt/kvm/arm/vgic/vgic-its.c
6356     @@ -309,21 +309,24 @@ static int vgic_copy_lpi_list(struct kvm_vcpu *vcpu, u32 **intid_ptr)
6357     struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
6358     struct vgic_irq *irq;
6359     u32 *intids;
6360     - int irq_count = dist->lpi_list_count, i = 0;
6361     + int irq_count, i = 0;
6362    
6363     /*
6364     - * We use the current value of the list length, which may change
6365     - * after the kmalloc. We don't care, because the guest shouldn't
6366     - * change anything while the command handling is still running,
6367     - * and in the worst case we would miss a new IRQ, which one wouldn't
6368     - * expect to be covered by this command anyway.
6369     + * There is an obvious race between allocating the array and LPIs
6370     + * being mapped/unmapped. If we ended up here as a result of a
6371     + * command, we're safe (locks are held, preventing another
6372     + * command). If coming from another path (such as enabling LPIs),
6373     + * we must be careful not to overrun the array.
6374     */
6375     + irq_count = READ_ONCE(dist->lpi_list_count);
6376     intids = kmalloc_array(irq_count, sizeof(intids[0]), GFP_KERNEL);
6377     if (!intids)
6378     return -ENOMEM;
6379    
6380     spin_lock(&dist->lpi_list_lock);
6381     list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
6382     + if (i == irq_count)
6383     + break;
6384     /* We don't need to "get" the IRQ, as we hold the list lock. */
6385     if (irq->target_vcpu != vcpu)
6386     continue;