Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.9/0168-4.9.69-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3053 - (hide annotations) (download)
Wed Dec 20 11:50:05 2017 UTC (6 years, 5 months ago) by niro
File size: 136581 byte(s)
-linux-4.9.69
1 niro 3053 diff --git a/Documentation/devicetree/bindings/usb/usb-device.txt b/Documentation/devicetree/bindings/usb/usb-device.txt
2     index 1c35e7b665e1..03ab8f5eab40 100644
3     --- a/Documentation/devicetree/bindings/usb/usb-device.txt
4     +++ b/Documentation/devicetree/bindings/usb/usb-device.txt
5     @@ -11,7 +11,7 @@ Required properties:
6     be used, but a device adhering to this binding may leave out all except
7     for usbVID,PID.
8     - reg: the port number which this device is connecting to, the range
9     - is 1-31.
10     + is 1-255.
11    
12     Example:
13    
14     diff --git a/Makefile b/Makefile
15     index dfe17af517b2..8f2819bf8135 100644
16     --- a/Makefile
17     +++ b/Makefile
18     @@ -1,6 +1,6 @@
19     VERSION = 4
20     PATCHLEVEL = 9
21     -SUBLEVEL = 68
22     +SUBLEVEL = 69
23     EXTRAVERSION =
24     NAME = Roaring Lionus
25    
26     diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
27     index 68b06f9c65de..12f99fd2e3b2 100644
28     --- a/arch/arm/include/asm/assembler.h
29     +++ b/arch/arm/include/asm/assembler.h
30     @@ -516,4 +516,22 @@ THUMB( orr \reg , \reg , #PSR_T_BIT )
31     #endif
32     .endm
33    
34     + .macro bug, msg, line
35     +#ifdef CONFIG_THUMB2_KERNEL
36     +1: .inst 0xde02
37     +#else
38     +1: .inst 0xe7f001f2
39     +#endif
40     +#ifdef CONFIG_DEBUG_BUGVERBOSE
41     + .pushsection .rodata.str, "aMS", %progbits, 1
42     +2: .asciz "\msg"
43     + .popsection
44     + .pushsection __bug_table, "aw"
45     + .align 2
46     + .word 1b, 2b
47     + .hword \line
48     + .popsection
49     +#endif
50     + .endm
51     +
52     #endif /* __ASM_ASSEMBLER_H__ */
53     diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h
54     index e22089fb44dc..98d6de177b7a 100644
55     --- a/arch/arm/include/asm/kvm_arm.h
56     +++ b/arch/arm/include/asm/kvm_arm.h
57     @@ -161,8 +161,7 @@
58     #else
59     #define VTTBR_X (5 - KVM_T0SZ)
60     #endif
61     -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
62     -#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
63     +#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
64     #define VTTBR_VMID_SHIFT _AC(48, ULL)
65     #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
66    
67     @@ -209,6 +208,7 @@
68     #define HSR_EC_IABT_HYP (0x21)
69     #define HSR_EC_DABT (0x24)
70     #define HSR_EC_DABT_HYP (0x25)
71     +#define HSR_EC_MAX (0x3f)
72    
73     #define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
74    
75     diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
76     index 1f59ea051bab..b7e0125c0bbf 100644
77     --- a/arch/arm/include/asm/uaccess.h
78     +++ b/arch/arm/include/asm/uaccess.h
79     @@ -478,11 +478,10 @@ extern unsigned long __must_check
80     arm_copy_from_user(void *to, const void __user *from, unsigned long n);
81    
82     static inline unsigned long __must_check
83     -__copy_from_user(void *to, const void __user *from, unsigned long n)
84     +__arch_copy_from_user(void *to, const void __user *from, unsigned long n)
85     {
86     unsigned int __ua_flags;
87    
88     - check_object_size(to, n, false);
89     __ua_flags = uaccess_save_and_enable();
90     n = arm_copy_from_user(to, from, n);
91     uaccess_restore(__ua_flags);
92     @@ -495,18 +494,15 @@ extern unsigned long __must_check
93     __copy_to_user_std(void __user *to, const void *from, unsigned long n);
94    
95     static inline unsigned long __must_check
96     -__copy_to_user(void __user *to, const void *from, unsigned long n)
97     +__arch_copy_to_user(void __user *to, const void *from, unsigned long n)
98     {
99     #ifndef CONFIG_UACCESS_WITH_MEMCPY
100     unsigned int __ua_flags;
101     -
102     - check_object_size(from, n, true);
103     __ua_flags = uaccess_save_and_enable();
104     n = arm_copy_to_user(to, from, n);
105     uaccess_restore(__ua_flags);
106     return n;
107     #else
108     - check_object_size(from, n, true);
109     return arm_copy_to_user(to, from, n);
110     #endif
111     }
112     @@ -526,25 +522,49 @@ __clear_user(void __user *addr, unsigned long n)
113     }
114    
115     #else
116     -#define __copy_from_user(to, from, n) (memcpy(to, (void __force *)from, n), 0)
117     -#define __copy_to_user(to, from, n) (memcpy((void __force *)to, from, n), 0)
118     +#define __arch_copy_from_user(to, from, n) \
119     + (memcpy(to, (void __force *)from, n), 0)
120     +#define __arch_copy_to_user(to, from, n) \
121     + (memcpy((void __force *)to, from, n), 0)
122     #define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0)
123     #endif
124    
125     -static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
126     +static inline unsigned long __must_check
127     +__copy_from_user(void *to, const void __user *from, unsigned long n)
128     +{
129     + check_object_size(to, n, false);
130     + return __arch_copy_from_user(to, from, n);
131     +}
132     +
133     +static inline unsigned long __must_check
134     +copy_from_user(void *to, const void __user *from, unsigned long n)
135     {
136     unsigned long res = n;
137     +
138     + check_object_size(to, n, false);
139     +
140     if (likely(access_ok(VERIFY_READ, from, n)))
141     - res = __copy_from_user(to, from, n);
142     + res = __arch_copy_from_user(to, from, n);
143     if (unlikely(res))
144     memset(to + (n - res), 0, res);
145     return res;
146     }
147    
148     -static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
149     +static inline unsigned long __must_check
150     +__copy_to_user(void __user *to, const void *from, unsigned long n)
151     {
152     + check_object_size(from, n, true);
153     +
154     + return __arch_copy_to_user(to, from, n);
155     +}
156     +
157     +static inline unsigned long __must_check
158     +copy_to_user(void __user *to, const void *from, unsigned long n)
159     +{
160     + check_object_size(from, n, true);
161     +
162     if (access_ok(VERIFY_WRITE, to, n))
163     - n = __copy_to_user(to, from, n);
164     + n = __arch_copy_to_user(to, from, n);
165     return n;
166     }
167    
168     diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
169     index 6391728c8f03..e056c9a9aa9d 100644
170     --- a/arch/arm/kernel/entry-header.S
171     +++ b/arch/arm/kernel/entry-header.S
172     @@ -299,6 +299,8 @@
173     mov r2, sp
174     ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
175     ldr lr, [r2, #\offset + S_PC]! @ get pc
176     + tst r1, #PSR_I_BIT | 0x0f
177     + bne 1f
178     msr spsr_cxsf, r1 @ save in spsr_svc
179     #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
180     @ We must avoid clrex due to Cortex-A15 erratum #830321
181     @@ -313,6 +315,7 @@
182     @ after ldm {}^
183     add sp, sp, #\offset + PT_REGS_SIZE
184     movs pc, lr @ return & move spsr_svc into cpsr
185     +1: bug "Returning to usermode but unexpected PSR bits set?", \@
186     #elif defined(CONFIG_CPU_V7M)
187     @ V7M restore.
188     @ Note that we don't need to do clrex here as clearing the local
189     @@ -328,6 +331,8 @@
190     ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
191     ldr lr, [sp, #\offset + S_PC] @ get pc
192     add sp, sp, #\offset + S_SP
193     + tst r1, #PSR_I_BIT | 0x0f
194     + bne 1f
195     msr spsr_cxsf, r1 @ save in spsr_svc
196    
197     @ We must avoid clrex due to Cortex-A15 erratum #830321
198     @@ -340,6 +345,7 @@
199     .endif
200     add sp, sp, #PT_REGS_SIZE - S_SP
201     movs pc, lr @ return & move spsr_svc into cpsr
202     +1: bug "Returning to usermode but unexpected PSR bits set?", \@
203     #endif /* !CONFIG_THUMB2_KERNEL */
204     .endm
205    
206     diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
207     index 066b6d4508ce..42f5daf715d0 100644
208     --- a/arch/arm/kvm/handle_exit.c
209     +++ b/arch/arm/kvm/handle_exit.c
210     @@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
211     return 1;
212     }
213    
214     +static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
215     +{
216     + u32 hsr = kvm_vcpu_get_hsr(vcpu);
217     +
218     + kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
219     + hsr);
220     +
221     + kvm_inject_undefined(vcpu);
222     + return 1;
223     +}
224     +
225     static exit_handle_fn arm_exit_handlers[] = {
226     + [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
227     [HSR_EC_WFI] = kvm_handle_wfx,
228     [HSR_EC_CP15_32] = kvm_handle_cp15_32,
229     [HSR_EC_CP15_64] = kvm_handle_cp15_64,
230     @@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
231     {
232     u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
233    
234     - if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
235     - !arm_exit_handlers[hsr_ec]) {
236     - kvm_err("Unknown exception class: hsr: %#08x\n",
237     - (unsigned int)kvm_vcpu_get_hsr(vcpu));
238     - BUG();
239     - }
240     -
241     return arm_exit_handlers[hsr_ec];
242     }
243    
244     diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
245     index 8633c703546a..2944af820558 100644
246     --- a/arch/arm/mach-omap2/gpmc-onenand.c
247     +++ b/arch/arm/mach-omap2/gpmc-onenand.c
248     @@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr)
249     return ret;
250     }
251    
252     -void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
253     +int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
254     {
255     int err;
256     struct device *dev = &gpmc_onenand_device.dev;
257     @@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data)
258     if (err < 0) {
259     dev_err(dev, "Cannot request GPMC CS %d, error %d\n",
260     gpmc_onenand_data->cs, err);
261     - return;
262     + return err;
263     }
264    
265     gpmc_onenand_resource.end = gpmc_onenand_resource.start +
266     ONENAND_IO_SIZE - 1;
267    
268     - if (platform_device_register(&gpmc_onenand_device) < 0) {
269     + err = platform_device_register(&gpmc_onenand_device);
270     + if (err) {
271     dev_err(dev, "Unable to register OneNAND device\n");
272     gpmc_cs_free(gpmc_onenand_data->cs);
273     - return;
274     }
275     +
276     + return err;
277     }
278     diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
279     index 1cc4a6f3954e..bca54154e14f 100644
280     --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
281     +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
282     @@ -3828,16 +3828,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = {
283     * Return: 0 if device named @dev_name is not likely to be accessible,
284     * or 1 if it is likely to be accessible.
285     */
286     -static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
287     - const char *dev_name)
288     +static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus,
289     + const char *dev_name)
290     {
291     + struct device_node *node;
292     + bool available;
293     +
294     if (!bus)
295     - return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0;
296     + return omap_type() == OMAP2_DEVICE_TYPE_GP;
297    
298     - if (of_device_is_available(of_find_node_by_name(bus, dev_name)))
299     - return 1;
300     + node = of_get_child_by_name(bus, dev_name);
301     + available = of_device_is_available(node);
302     + of_node_put(node);
303    
304     - return 0;
305     + return available;
306     }
307    
308     int __init omap3xxx_hwmod_init(void)
309     @@ -3906,15 +3910,20 @@ int __init omap3xxx_hwmod_init(void)
310    
311     if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) {
312     r = omap_hwmod_register_links(h_sham);
313     - if (r < 0)
314     + if (r < 0) {
315     + of_node_put(bus);
316     return r;
317     + }
318     }
319    
320     if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) {
321     r = omap_hwmod_register_links(h_aes);
322     - if (r < 0)
323     + if (r < 0) {
324     + of_node_put(bus);
325     return r;
326     + }
327     }
328     + of_node_put(bus);
329    
330     /*
331     * Register hwmod links specific to certain ES levels of a
332     diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
333     index 2a2752b5b6aa..0dbc1c6ab7dc 100644
334     --- a/arch/arm64/include/asm/kvm_arm.h
335     +++ b/arch/arm64/include/asm/kvm_arm.h
336     @@ -170,8 +170,7 @@
337     #define VTCR_EL2_FLAGS (VTCR_EL2_COMMON_BITS | VTCR_EL2_TGRAN_FLAGS)
338     #define VTTBR_X (VTTBR_X_TGRAN_MAGIC - VTCR_EL2_T0SZ_IPA)
339    
340     -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1)
341     -#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT)
342     +#define VTTBR_BADDR_MASK (((UL(1) << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_X)
343     #define VTTBR_VMID_SHIFT (UL(48))
344     #define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
345    
346     diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
347     index 01753cd7d3f0..0e7394915c70 100644
348     --- a/arch/arm64/kernel/process.c
349     +++ b/arch/arm64/kernel/process.c
350     @@ -255,6 +255,15 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
351    
352     memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context));
353    
354     + /*
355     + * In case p was allocated the same task_struct pointer as some
356     + * other recently-exited task, make sure p is disassociated from
357     + * any cpu that may have run that now-exited task recently.
358     + * Otherwise we could erroneously skip reloading the FPSIMD
359     + * registers for p.
360     + */
361     + fpsimd_flush_task_state(p);
362     +
363     if (likely(!(p->flags & PF_KTHREAD))) {
364     *childregs = *current_pt_regs();
365     childregs->regs[0] = 0;
366     diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
367     index a204adf29f0a..85baadab02d3 100644
368     --- a/arch/arm64/kvm/handle_exit.c
369     +++ b/arch/arm64/kvm/handle_exit.c
370     @@ -125,7 +125,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
371     return ret;
372     }
373    
374     +static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
375     +{
376     + u32 hsr = kvm_vcpu_get_hsr(vcpu);
377     +
378     + kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
379     + hsr, esr_get_class_string(hsr));
380     +
381     + kvm_inject_undefined(vcpu);
382     + return 1;
383     +}
384     +
385     static exit_handle_fn arm_exit_handlers[] = {
386     + [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
387     [ESR_ELx_EC_WFx] = kvm_handle_wfx,
388     [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
389     [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
390     @@ -151,13 +163,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
391     u32 hsr = kvm_vcpu_get_hsr(vcpu);
392     u8 hsr_ec = ESR_ELx_EC(hsr);
393    
394     - if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
395     - !arm_exit_handlers[hsr_ec]) {
396     - kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
397     - hsr, esr_get_class_string(hsr));
398     - BUG();
399     - }
400     -
401     return arm_exit_handlers[hsr_ec];
402     }
403    
404     diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
405     index 617dece67924..a60c9c6e5cc1 100644
406     --- a/arch/powerpc/Makefile
407     +++ b/arch/powerpc/Makefile
408     @@ -72,8 +72,15 @@ GNUTARGET := powerpc
409     MULTIPLEWORD := -mmultiple
410     endif
411    
412     -cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
413     +ifdef CONFIG_PPC64
414     +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
415     +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc)
416     +aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1)
417     +aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2
418     +endif
419     +
420     cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian
421     +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian)
422     ifneq ($(cc-name),clang)
423     cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align
424     endif
425     @@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y)
426     CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc))
427     AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2)
428     else
429     +CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
430     CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc)
431     +AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1)
432     endif
433     CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc))
434     CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions)
435     diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h
436     index 1e8fceb308a5..a67bb09585f4 100644
437     --- a/arch/powerpc/include/asm/checksum.h
438     +++ b/arch/powerpc/include/asm/checksum.h
439     @@ -100,7 +100,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
440    
441     #ifdef __powerpc64__
442     res += (__force u64)addend;
443     - return (__force __wsum)((u32)res + (res >> 32));
444     + return (__force __wsum) from64to32(res);
445     #else
446     asm("addc %0,%0,%1;"
447     "addze %0,%0;"
448     diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
449     index 7803756998e2..9e05c8828ee2 100644
450     --- a/arch/powerpc/kernel/cpu_setup_power.S
451     +++ b/arch/powerpc/kernel/cpu_setup_power.S
452     @@ -97,6 +97,7 @@ _GLOBAL(__setup_cpu_power9)
453     beqlr
454     li r0,0
455     mtspr SPRN_LPID,r0
456     + mtspr SPRN_PID,r0
457     mfspr r3,SPRN_LPCR
458     LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
459     or r3, r3, r4
460     @@ -119,6 +120,7 @@ _GLOBAL(__restore_cpu_power9)
461     beqlr
462     li r0,0
463     mtspr SPRN_LPID,r0
464     + mtspr SPRN_PID,r0
465     mfspr r3,SPRN_LPCR
466     LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
467     or r3, r3, r4
468     diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c
469     index 9a25dce87875..44c33ee397a0 100644
470     --- a/arch/powerpc/mm/pgtable-radix.c
471     +++ b/arch/powerpc/mm/pgtable-radix.c
472     @@ -173,6 +173,10 @@ static void __init radix_init_pgtable(void)
473     */
474     register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12);
475     pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd);
476     + asm volatile("ptesync" : : : "memory");
477     + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
478     + "r" (TLBIEL_INVAL_SET_LPID), "r" (0));
479     + asm volatile("eieio; tlbsync; ptesync" : : : "memory");
480     }
481    
482     static void __init radix_init_partition_table(void)
483     diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
484     index dcdfee0cd4f2..f602307a4386 100644
485     --- a/arch/powerpc/platforms/powernv/pci-ioda.c
486     +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
487     @@ -2623,6 +2623,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
488     level_shift = entries_shift + 3;
489     level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
490    
491     + if ((level_shift - 3) * levels + page_shift >= 60)
492     + return -EINVAL;
493     +
494     /* Allocate TCE table */
495     addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
496     levels, tce_table_size, &offset, &total_allocated);
497     diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
498     index ada29eaed6e2..f523ac883150 100644
499     --- a/arch/powerpc/sysdev/axonram.c
500     +++ b/arch/powerpc/sysdev/axonram.c
501     @@ -274,7 +274,9 @@ static int axon_ram_probe(struct platform_device *device)
502     if (bank->disk->major > 0)
503     unregister_blkdev(bank->disk->major,
504     bank->disk->disk_name);
505     - del_gendisk(bank->disk);
506     + if (bank->disk->flags & GENHD_FL_UP)
507     + del_gendisk(bank->disk);
508     + put_disk(bank->disk);
509     }
510     device->dev.platform_data = NULL;
511     if (bank->io_addr != 0)
512     @@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device)
513     device_remove_file(&device->dev, &dev_attr_ecc);
514     free_irq(bank->irq_id, device);
515     del_gendisk(bank->disk);
516     + put_disk(bank->disk);
517     iounmap((void __iomem *) bank->io_addr);
518     kfree(bank);
519    
520     diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
521     index 9b59e6212d8f..709da452413d 100644
522     --- a/arch/s390/kernel/syscalls.S
523     +++ b/arch/s390/kernel/syscalls.S
524     @@ -369,10 +369,10 @@ SYSCALL(sys_recvmmsg,compat_sys_recvmmsg)
525     SYSCALL(sys_sendmmsg,compat_sys_sendmmsg)
526     SYSCALL(sys_socket,sys_socket)
527     SYSCALL(sys_socketpair,compat_sys_socketpair) /* 360 */
528     -SYSCALL(sys_bind,sys_bind)
529     -SYSCALL(sys_connect,sys_connect)
530     +SYSCALL(sys_bind,compat_sys_bind)
531     +SYSCALL(sys_connect,compat_sys_connect)
532     SYSCALL(sys_listen,sys_listen)
533     -SYSCALL(sys_accept4,sys_accept4)
534     +SYSCALL(sys_accept4,compat_sys_accept4)
535     SYSCALL(sys_getsockopt,compat_sys_getsockopt) /* 365 */
536     SYSCALL(sys_setsockopt,compat_sys_setsockopt)
537     SYSCALL(sys_getsockname,compat_sys_getsockname)
538     diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
539     index e18435355c16..c2905a10cb37 100644
540     --- a/arch/s390/kvm/priv.c
541     +++ b/arch/s390/kvm/priv.c
542     @@ -197,8 +197,6 @@ static int try_handle_skey(struct kvm_vcpu *vcpu)
543     VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
544     return -EAGAIN;
545     }
546     - if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
547     - return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
548     return 0;
549     }
550    
551     @@ -209,6 +207,9 @@ static int handle_iske(struct kvm_vcpu *vcpu)
552     int reg1, reg2;
553     int rc;
554    
555     + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
556     + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
557     +
558     rc = try_handle_skey(vcpu);
559     if (rc)
560     return rc != -EAGAIN ? rc : 0;
561     @@ -238,6 +239,9 @@ static int handle_rrbe(struct kvm_vcpu *vcpu)
562     int reg1, reg2;
563     int rc;
564    
565     + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
566     + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
567     +
568     rc = try_handle_skey(vcpu);
569     if (rc)
570     return rc != -EAGAIN ? rc : 0;
571     @@ -273,6 +277,9 @@ static int handle_sske(struct kvm_vcpu *vcpu)
572     int reg1, reg2;
573     int rc;
574    
575     + if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
576     + return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
577     +
578     rc = try_handle_skey(vcpu);
579     if (rc)
580     return rc != -EAGAIN ? rc : 0;
581     diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
582     index 57154c638e71..0f183ffe3416 100644
583     --- a/arch/sparc/mm/init_64.c
584     +++ b/arch/sparc/mm/init_64.c
585     @@ -2391,9 +2391,16 @@ void __init mem_init(void)
586     {
587     high_memory = __va(last_valid_pfn << PAGE_SHIFT);
588    
589     - register_page_bootmem_info();
590     free_all_bootmem();
591    
592     + /*
593     + * Must be done after boot memory is put on freelist, because here we
594     + * might set fields in deferred struct pages that have not yet been
595     + * initialized, and free_all_bootmem() initializes all the reserved
596     + * deferred pages for us.
597     + */
598     + register_page_bootmem_info();
599     +
600     /*
601     * Set up the zero page, mark it reserved, so that page count
602     * is not manipulated when freeing the page from user ptes.
603     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
604     index bdde80731f49..cbd1d44da2d3 100644
605     --- a/arch/x86/include/asm/kvm_host.h
606     +++ b/arch/x86/include/asm/kvm_host.h
607     @@ -1397,4 +1397,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
608     #endif
609     }
610    
611     +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
612     + unsigned long start, unsigned long end);
613     +
614     #endif /* _ASM_X86_KVM_HOST_H */
615     diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
616     index 932348fbb6ea..9512529e8eab 100644
617     --- a/arch/x86/kernel/hpet.c
618     +++ b/arch/x86/kernel/hpet.c
619     @@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer)
620    
621     irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq));
622     irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
623     - disable_irq(hdev->irq);
624     + disable_hardirq(hdev->irq);
625     irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
626     enable_irq(hdev->irq);
627     }
628     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
629     index f0d3de153e29..9aa62ab13ae8 100644
630     --- a/arch/x86/kvm/vmx.c
631     +++ b/arch/x86/kvm/vmx.c
632     @@ -6413,12 +6413,7 @@ static __init int hardware_setup(void)
633     memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
634     memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
635    
636     - /*
637     - * Allow direct access to the PC debug port (it is often used for I/O
638     - * delays, but the vmexits simply slow things down).
639     - */
640     memset(vmx_io_bitmap_a, 0xff, PAGE_SIZE);
641     - clear_bit(0x80, vmx_io_bitmap_a);
642    
643     memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
644    
645     @@ -7208,9 +7203,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
646     static int handle_vmclear(struct kvm_vcpu *vcpu)
647     {
648     struct vcpu_vmx *vmx = to_vmx(vcpu);
649     + u32 zero = 0;
650     gpa_t vmptr;
651     - struct vmcs12 *vmcs12;
652     - struct page *page;
653    
654     if (!nested_vmx_check_permission(vcpu))
655     return 1;
656     @@ -7221,22 +7215,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu)
657     if (vmptr == vmx->nested.current_vmptr)
658     nested_release_vmcs12(vmx);
659    
660     - page = nested_get_page(vcpu, vmptr);
661     - if (page == NULL) {
662     - /*
663     - * For accurate processor emulation, VMCLEAR beyond available
664     - * physical memory should do nothing at all. However, it is
665     - * possible that a nested vmx bug, not a guest hypervisor bug,
666     - * resulted in this case, so let's shut down before doing any
667     - * more damage:
668     - */
669     - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
670     - return 1;
671     - }
672     - vmcs12 = kmap(page);
673     - vmcs12->launch_state = 0;
674     - kunmap(page);
675     - nested_release_page(page);
676     + kvm_vcpu_write_guest(vcpu,
677     + vmptr + offsetof(struct vmcs12, launch_state),
678     + &zero, sizeof(zero));
679    
680     nested_free_vmcs02(vmx, vmptr);
681    
682     @@ -10903,8 +10884,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
683     */
684     static void vmx_leave_nested(struct kvm_vcpu *vcpu)
685     {
686     - if (is_guest_mode(vcpu))
687     + if (is_guest_mode(vcpu)) {
688     + to_vmx(vcpu)->nested.nested_run_pending = 0;
689     nested_vmx_vmexit(vcpu, -1, 0, 0);
690     + }
691     free_nested(to_vmx(vcpu));
692     }
693    
694     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
695     index 02d45296a97c..26b580ad268f 100644
696     --- a/arch/x86/kvm/x86.c
697     +++ b/arch/x86/kvm/x86.c
698     @@ -6526,6 +6526,20 @@ static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
699     kvm_x86_ops->tlb_flush(vcpu);
700     }
701    
702     +void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
703     + unsigned long start, unsigned long end)
704     +{
705     + unsigned long apic_address;
706     +
707     + /*
708     + * The physical address of apic access page is stored in the VMCS.
709     + * Update it when it becomes invalid.
710     + */
711     + apic_address = gfn_to_hva(kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
712     + if (start <= apic_address && apic_address < end)
713     + kvm_make_all_cpus_request(kvm, KVM_REQ_APIC_PAGE_RELOAD);
714     +}
715     +
716     void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
717     {
718     struct page *page = NULL;
719     diff --git a/arch/x86/pci/broadcom_bus.c b/arch/x86/pci/broadcom_bus.c
720     index bb461cfd01ab..526536c81ddc 100644
721     --- a/arch/x86/pci/broadcom_bus.c
722     +++ b/arch/x86/pci/broadcom_bus.c
723     @@ -97,7 +97,7 @@ static int __init broadcom_postcore_init(void)
724     * We should get host bridge information from ACPI unless the BIOS
725     * doesn't support it.
726     */
727     - if (acpi_os_get_root_pointer())
728     + if (!acpi_disabled && acpi_os_get_root_pointer())
729     return 0;
730     #endif
731    
732     diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
733     index 9e42842e924a..0f0175186f1b 100644
734     --- a/arch/x86/platform/uv/tlb_uv.c
735     +++ b/arch/x86/platform/uv/tlb_uv.c
736     @@ -1848,7 +1848,6 @@ static void pq_init(int node, int pnode)
737    
738     ops.write_payload_first(pnode, first);
739     ops.write_payload_last(pnode, last);
740     - ops.write_g_sw_ack(pnode, 0xffffUL);
741    
742     /* in effect, all msg_type's are set to MSG_NOOP */
743     memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
744     diff --git a/block/blk-core.c b/block/blk-core.c
745     index b1c76aa73492..23daf40be371 100644
746     --- a/block/blk-core.c
747     +++ b/block/blk-core.c
748     @@ -527,8 +527,8 @@ void blk_set_queue_dying(struct request_queue *q)
749    
750     blk_queue_for_each_rl(rl, q) {
751     if (rl->rq_pool) {
752     - wake_up(&rl->wait[BLK_RW_SYNC]);
753     - wake_up(&rl->wait[BLK_RW_ASYNC]);
754     + wake_up_all(&rl->wait[BLK_RW_SYNC]);
755     + wake_up_all(&rl->wait[BLK_RW_ASYNC]);
756     }
757     }
758     }
759     diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
760     index 01fb455d3377..8c0894e0713b 100644
761     --- a/block/blk-mq-sysfs.c
762     +++ b/block/blk-mq-sysfs.c
763     @@ -429,7 +429,7 @@ void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
764     kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
765     }
766    
767     -static void blk_mq_sysfs_init(struct request_queue *q)
768     +void blk_mq_sysfs_init(struct request_queue *q)
769     {
770     struct blk_mq_ctx *ctx;
771     int cpu;
772     @@ -449,8 +449,6 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
773    
774     blk_mq_disable_hotplug();
775    
776     - blk_mq_sysfs_init(q);
777     -
778     ret = kobject_add(&q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
779     if (ret < 0)
780     goto out;
781     diff --git a/block/blk-mq.c b/block/blk-mq.c
782     index 7b597ec4e9c5..10f8f94b7f20 100644
783     --- a/block/blk-mq.c
784     +++ b/block/blk-mq.c
785     @@ -1707,7 +1707,6 @@ static void blk_mq_init_cpu_queues(struct request_queue *q,
786     struct blk_mq_ctx *__ctx = per_cpu_ptr(q->queue_ctx, i);
787     struct blk_mq_hw_ctx *hctx;
788    
789     - memset(__ctx, 0, sizeof(*__ctx));
790     __ctx->cpu = i;
791     spin_lock_init(&__ctx->lock);
792     INIT_LIST_HEAD(&__ctx->rq_list);
793     @@ -1970,6 +1969,9 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
794     if (!q->queue_ctx)
795     goto err_exit;
796    
797     + /* init q->mq_kobj and sw queues' kobjects */
798     + blk_mq_sysfs_init(q);
799     +
800     q->queue_hw_ctx = kzalloc_node(nr_cpu_ids * sizeof(*(q->queue_hw_ctx)),
801     GFP_KERNEL, set->numa_node);
802     if (!q->queue_hw_ctx)
803     diff --git a/block/blk-mq.h b/block/blk-mq.h
804     index e5d25249028c..c55bcf67b956 100644
805     --- a/block/blk-mq.h
806     +++ b/block/blk-mq.h
807     @@ -50,6 +50,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
808     /*
809     * sysfs helpers
810     */
811     +extern void blk_mq_sysfs_init(struct request_queue *q);
812     extern int blk_mq_sysfs_register(struct request_queue *q);
813     extern void blk_mq_sysfs_unregister(struct request_queue *q);
814     extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
815     diff --git a/crypto/asymmetric_keys/pkcs7_verify.c b/crypto/asymmetric_keys/pkcs7_verify.c
816     index 2ffd69769466..5a37962d2199 100644
817     --- a/crypto/asymmetric_keys/pkcs7_verify.c
818     +++ b/crypto/asymmetric_keys/pkcs7_verify.c
819     @@ -150,7 +150,7 @@ static int pkcs7_find_key(struct pkcs7_message *pkcs7,
820     pr_devel("Sig %u: Found cert serial match X.509[%u]\n",
821     sinfo->index, certix);
822    
823     - if (x509->pub->pkey_algo != sinfo->sig->pkey_algo) {
824     + if (strcmp(x509->pub->pkey_algo, sinfo->sig->pkey_algo) != 0) {
825     pr_warn("Sig %u: X.509 algo and PKCS#7 sig algo don't match\n",
826     sinfo->index);
827     continue;
828     diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
829     index c80765b211cf..029f7051f2be 100644
830     --- a/crypto/asymmetric_keys/x509_cert_parser.c
831     +++ b/crypto/asymmetric_keys/x509_cert_parser.c
832     @@ -408,6 +408,8 @@ int x509_extract_key_data(void *context, size_t hdrlen,
833     ctx->cert->pub->pkey_algo = "rsa";
834    
835     /* Discard the BIT STRING metadata */
836     + if (vlen < 1 || *(const u8 *)value != 0)
837     + return -EBADMSG;
838     ctx->key = value + 1;
839     ctx->key_size = vlen - 1;
840     return 0;
841     diff --git a/crypto/asymmetric_keys/x509_public_key.c b/crypto/asymmetric_keys/x509_public_key.c
842     index fb732296cd36..e16009a8da9c 100644
843     --- a/crypto/asymmetric_keys/x509_public_key.c
844     +++ b/crypto/asymmetric_keys/x509_public_key.c
845     @@ -125,7 +125,7 @@ int x509_check_for_self_signed(struct x509_certificate *cert)
846     }
847    
848     ret = -EKEYREJECTED;
849     - if (cert->pub->pkey_algo != cert->sig->pkey_algo)
850     + if (strcmp(cert->pub->pkey_algo, cert->sig->pkey_algo) != 0)
851     goto out;
852    
853     ret = public_key_verify_signature(cert->pub, cert->sig);
854     diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
855     index 051b6158d1b7..8d22acdf90f0 100644
856     --- a/drivers/ata/libata-sff.c
857     +++ b/drivers/ata/libata-sff.c
858     @@ -1481,7 +1481,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
859     break;
860    
861     default:
862     - WARN_ON_ONCE(1);
863     return AC_ERR_SYSTEM;
864     }
865    
866     diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
867     index 5fc81e240c24..e55f418d6ab9 100644
868     --- a/drivers/atm/horizon.c
869     +++ b/drivers/atm/horizon.c
870     @@ -2802,7 +2802,7 @@ static int hrz_probe(struct pci_dev *pci_dev,
871     return err;
872    
873     out_free_irq:
874     - free_irq(dev->irq, dev);
875     + free_irq(irq, dev);
876     out_free:
877     kfree(dev);
878     out_release:
879     diff --git a/drivers/base/isa.c b/drivers/base/isa.c
880     index cd6ccdcf9df0..372d10af2600 100644
881     --- a/drivers/base/isa.c
882     +++ b/drivers/base/isa.c
883     @@ -39,7 +39,7 @@ static int isa_bus_probe(struct device *dev)
884     {
885     struct isa_driver *isa_driver = dev->platform_data;
886    
887     - if (isa_driver->probe)
888     + if (isa_driver && isa_driver->probe)
889     return isa_driver->probe(dev, to_isa_dev(dev)->id);
890    
891     return 0;
892     @@ -49,7 +49,7 @@ static int isa_bus_remove(struct device *dev)
893     {
894     struct isa_driver *isa_driver = dev->platform_data;
895    
896     - if (isa_driver->remove)
897     + if (isa_driver && isa_driver->remove)
898     return isa_driver->remove(dev, to_isa_dev(dev)->id);
899    
900     return 0;
901     @@ -59,7 +59,7 @@ static void isa_bus_shutdown(struct device *dev)
902     {
903     struct isa_driver *isa_driver = dev->platform_data;
904    
905     - if (isa_driver->shutdown)
906     + if (isa_driver && isa_driver->shutdown)
907     isa_driver->shutdown(dev, to_isa_dev(dev)->id);
908     }
909    
910     @@ -67,7 +67,7 @@ static int isa_bus_suspend(struct device *dev, pm_message_t state)
911     {
912     struct isa_driver *isa_driver = dev->platform_data;
913    
914     - if (isa_driver->suspend)
915     + if (isa_driver && isa_driver->suspend)
916     return isa_driver->suspend(dev, to_isa_dev(dev)->id, state);
917    
918     return 0;
919     @@ -77,7 +77,7 @@ static int isa_bus_resume(struct device *dev)
920     {
921     struct isa_driver *isa_driver = dev->platform_data;
922    
923     - if (isa_driver->resume)
924     + if (isa_driver && isa_driver->resume)
925     return isa_driver->resume(dev, to_isa_dev(dev)->id);
926    
927     return 0;
928     diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
929     index c9914d653968..b7c0b69a02f5 100644
930     --- a/drivers/block/zram/zram_drv.c
931     +++ b/drivers/block/zram/zram_drv.c
932     @@ -1286,6 +1286,8 @@ static int zram_add(void)
933     blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
934     blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
935     zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
936     + zram->disk->queue->limits.max_sectors = SECTORS_PER_PAGE;
937     + zram->disk->queue->limits.chunk_sectors = 0;
938     blk_queue_max_discard_sectors(zram->disk->queue, UINT_MAX);
939     /*
940     * zram_bio_discard() will clear all logical blocks if logical block
941     diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
942     index 890082315054..10f56133b281 100644
943     --- a/drivers/bus/arm-cci.c
944     +++ b/drivers/bus/arm-cci.c
945     @@ -1755,14 +1755,17 @@ static int cci_pmu_probe(struct platform_device *pdev)
946     raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
947     mutex_init(&cci_pmu->reserve_mutex);
948     atomic_set(&cci_pmu->active_events, 0);
949     - cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus);
950     + cpumask_set_cpu(get_cpu(), &cci_pmu->cpus);
951    
952     ret = cci_pmu_init(cci_pmu, pdev);
953     - if (ret)
954     + if (ret) {
955     + put_cpu();
956     return ret;
957     + }
958    
959     cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
960     &cci_pmu->node);
961     + put_cpu();
962     pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
963     return 0;
964     }
965     diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
966     index aee83462b796..f0249899fc96 100644
967     --- a/drivers/bus/arm-ccn.c
968     +++ b/drivers/bus/arm-ccn.c
969     @@ -1271,6 +1271,10 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
970     int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
971    
972     name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
973     + if (!name) {
974     + err = -ENOMEM;
975     + goto error_choose_name;
976     + }
977     snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
978     }
979    
980     @@ -1297,7 +1301,7 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
981     }
982    
983     /* Pick one CPU which we will use to collect data from CCN... */
984     - cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu);
985     + cpumask_set_cpu(get_cpu(), &ccn->dt.cpu);
986    
987     /* Also make sure that the overflow interrupt is handled by this CPU */
988     if (ccn->irq) {
989     @@ -1314,10 +1318,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
990    
991     cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
992     &ccn->dt.node);
993     + put_cpu();
994     return 0;
995    
996     error_pmu_register:
997     error_set_affinity:
998     + put_cpu();
999     +error_choose_name:
1000     ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
1001     for (i = 0; i < ccn->num_xps; i++)
1002     writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
1003     @@ -1578,8 +1585,8 @@ static int __init arm_ccn_init(void)
1004    
1005     static void __exit arm_ccn_exit(void)
1006     {
1007     - cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1008     platform_driver_unregister(&arm_ccn_driver);
1009     + cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CCN_ONLINE);
1010     }
1011    
1012     module_init(arm_ccn_init);
1013     diff --git a/drivers/clk/uniphier/clk-uniphier-sys.c b/drivers/clk/uniphier/clk-uniphier-sys.c
1014     index 5d029991047d..481225adef87 100644
1015     --- a/drivers/clk/uniphier/clk-uniphier-sys.c
1016     +++ b/drivers/clk/uniphier/clk-uniphier-sys.c
1017     @@ -98,7 +98,7 @@ const struct uniphier_clk_data uniphier_sld8_sys_clk_data[] = {
1018     const struct uniphier_clk_data uniphier_pro5_sys_clk_data[] = {
1019     UNIPHIER_CLK_FACTOR("spll", -1, "ref", 120, 1), /* 2400 MHz */
1020     UNIPHIER_CLK_FACTOR("dapll1", -1, "ref", 128, 1), /* 2560 MHz */
1021     - UNIPHIER_CLK_FACTOR("dapll2", -1, "ref", 144, 125), /* 2949.12 MHz */
1022     + UNIPHIER_CLK_FACTOR("dapll2", -1, "dapll1", 144, 125), /* 2949.12 MHz */
1023     UNIPHIER_CLK_FACTOR("uart", 0, "dapll2", 1, 40),
1024     UNIPHIER_CLK_FACTOR("i2c", 1, "spll", 1, 48),
1025     UNIPHIER_PRO5_SYS_CLK_SD,
1026     diff --git a/drivers/crypto/s5p-sss.c b/drivers/crypto/s5p-sss.c
1027     index dce1af0ce85c..a668286d62cb 100644
1028     --- a/drivers/crypto/s5p-sss.c
1029     +++ b/drivers/crypto/s5p-sss.c
1030     @@ -805,8 +805,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
1031     dev_warn(dev, "feed control interrupt is not available.\n");
1032     goto err_irq;
1033     }
1034     - err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
1035     - IRQF_SHARED, pdev->name, pdev);
1036     + err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
1037     + s5p_aes_interrupt, IRQF_ONESHOT,
1038     + pdev->name, pdev);
1039     if (err < 0) {
1040     dev_warn(dev, "feed control interrupt is not available.\n");
1041     goto err_irq;
1042     diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
1043     index e2d323fa2437..1c8d79d93098 100644
1044     --- a/drivers/crypto/talitos.c
1045     +++ b/drivers/crypto/talitos.c
1046     @@ -1232,12 +1232,11 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1047     sg_link_tbl_len += authsize;
1048     }
1049    
1050     - sg_count = talitos_sg_map(dev, areq->src, cryptlen, edesc,
1051     - &desc->ptr[4], sg_count, areq->assoclen,
1052     - tbl_off);
1053     + ret = talitos_sg_map(dev, areq->src, sg_link_tbl_len, edesc,
1054     + &desc->ptr[4], sg_count, areq->assoclen, tbl_off);
1055    
1056     - if (sg_count > 1) {
1057     - tbl_off += sg_count;
1058     + if (ret > 1) {
1059     + tbl_off += ret;
1060     sync_needed = true;
1061     }
1062    
1063     @@ -1248,14 +1247,15 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1064     dma_map_sg(dev, areq->dst, sg_count, DMA_FROM_DEVICE);
1065     }
1066    
1067     - sg_count = talitos_sg_map(dev, areq->dst, cryptlen, edesc,
1068     - &desc->ptr[5], sg_count, areq->assoclen,
1069     - tbl_off);
1070     + ret = talitos_sg_map(dev, areq->dst, cryptlen, edesc, &desc->ptr[5],
1071     + sg_count, areq->assoclen, tbl_off);
1072    
1073     if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1074     to_talitos_ptr_ext_or(&desc->ptr[5], authsize, is_sec1);
1075    
1076     - if (sg_count > 1) {
1077     + /* ICV data */
1078     + if (ret > 1) {
1079     + tbl_off += ret;
1080     edesc->icv_ool = true;
1081     sync_needed = true;
1082    
1083     @@ -1265,9 +1265,7 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1084     sizeof(struct talitos_ptr) + authsize;
1085    
1086     /* Add an entry to the link table for ICV data */
1087     - tbl_ptr += sg_count - 1;
1088     - to_talitos_ptr_ext_set(tbl_ptr, 0, is_sec1);
1089     - tbl_ptr++;
1090     + to_talitos_ptr_ext_set(tbl_ptr - 1, 0, is_sec1);
1091     to_talitos_ptr_ext_set(tbl_ptr, DESC_PTR_LNKTBL_RETURN,
1092     is_sec1);
1093     to_talitos_ptr_len(tbl_ptr, authsize, is_sec1);
1094     @@ -1275,18 +1273,33 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
1095     /* icv data follows link tables */
1096     to_talitos_ptr(tbl_ptr, edesc->dma_link_tbl + offset,
1097     is_sec1);
1098     + } else {
1099     + dma_addr_t addr = edesc->dma_link_tbl;
1100     +
1101     + if (is_sec1)
1102     + addr += areq->assoclen + cryptlen;
1103     + else
1104     + addr += sizeof(struct talitos_ptr) * tbl_off;
1105     +
1106     + to_talitos_ptr(&desc->ptr[6], addr, is_sec1);
1107     + to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1108     + }
1109     + } else if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1110     + ret = talitos_sg_map(dev, areq->dst, authsize, edesc,
1111     + &desc->ptr[6], sg_count, areq->assoclen +
1112     + cryptlen,
1113     + tbl_off);
1114     + if (ret > 1) {
1115     + tbl_off += ret;
1116     + edesc->icv_ool = true;
1117     + sync_needed = true;
1118     + } else {
1119     + edesc->icv_ool = false;
1120     }
1121     } else {
1122     edesc->icv_ool = false;
1123     }
1124    
1125     - /* ICV data */
1126     - if (!(desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)) {
1127     - to_talitos_ptr_len(&desc->ptr[6], authsize, is_sec1);
1128     - to_talitos_ptr(&desc->ptr[6], edesc->dma_link_tbl +
1129     - areq->assoclen + cryptlen, is_sec1);
1130     - }
1131     -
1132     /* iv out */
1133     if (desc->hdr & DESC_HDR_TYPE_IPSEC_ESP)
1134     map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
1135     @@ -1494,12 +1507,20 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *cipher,
1136     const u8 *key, unsigned int keylen)
1137     {
1138     struct talitos_ctx *ctx = crypto_ablkcipher_ctx(cipher);
1139     + u32 tmp[DES_EXPKEY_WORDS];
1140    
1141     if (keylen > TALITOS_MAX_KEY_SIZE) {
1142     crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
1143     return -EINVAL;
1144     }
1145    
1146     + if (unlikely(crypto_ablkcipher_get_flags(cipher) &
1147     + CRYPTO_TFM_REQ_WEAK_KEY) &&
1148     + !des_ekey(tmp, key)) {
1149     + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_WEAK_KEY);
1150     + return -EINVAL;
1151     + }
1152     +
1153     memcpy(&ctx->key, key, keylen);
1154     ctx->keylen = keylen;
1155    
1156     @@ -2614,7 +2635,7 @@ static struct talitos_alg_template driver_algs[] = {
1157     .ivsize = AES_BLOCK_SIZE,
1158     }
1159     },
1160     - .desc_hdr_template = DESC_HDR_TYPE_COMMON_NONSNOOP_NO_AFEU |
1161     + .desc_hdr_template = DESC_HDR_TYPE_AESU_CTR_NONSNOOP |
1162     DESC_HDR_SEL0_AESU |
1163     DESC_HDR_MODE0_AESU_CTR,
1164     },
1165     @@ -3047,6 +3068,11 @@ static struct talitos_crypto_alg *talitos_alg_alloc(struct device *dev,
1166     t_alg->algt.alg.aead.setkey = aead_setkey;
1167     t_alg->algt.alg.aead.encrypt = aead_encrypt;
1168     t_alg->algt.alg.aead.decrypt = aead_decrypt;
1169     + if (!(priv->features & TALITOS_FTR_SHA224_HWINIT) &&
1170     + !strncmp(alg->cra_name, "authenc(hmac(sha224)", 20)) {
1171     + kfree(t_alg);
1172     + return ERR_PTR(-ENOTSUPP);
1173     + }
1174     break;
1175     case CRYPTO_ALG_TYPE_AHASH:
1176     alg = &t_alg->algt.alg.hash.halg.base;
1177     diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
1178     index 72e07e3cf718..16e0eb523439 100644
1179     --- a/drivers/edac/i5000_edac.c
1180     +++ b/drivers/edac/i5000_edac.c
1181     @@ -227,7 +227,7 @@
1182     #define NREC_RDWR(x) (((x)>>11) & 1)
1183     #define NREC_RANK(x) (((x)>>8) & 0x7)
1184     #define NRECMEMB 0xC0
1185     -#define NREC_CAS(x) (((x)>>16) & 0xFFFFFF)
1186     +#define NREC_CAS(x) (((x)>>16) & 0xFFF)
1187     #define NREC_RAS(x) ((x) & 0x7FFF)
1188     #define NRECFGLOG 0xC4
1189     #define NREEECFBDA 0xC8
1190     @@ -371,7 +371,7 @@ struct i5000_error_info {
1191     /* These registers are input ONLY if there was a
1192     * Non-Recoverable Error */
1193     u16 nrecmema; /* Non-Recoverable Mem log A */
1194     - u16 nrecmemb; /* Non-Recoverable Mem log B */
1195     + u32 nrecmemb; /* Non-Recoverable Mem log B */
1196    
1197     };
1198    
1199     @@ -407,7 +407,7 @@ static void i5000_get_error_info(struct mem_ctl_info *mci,
1200     NERR_FAT_FBD, &info->nerr_fat_fbd);
1201     pci_read_config_word(pvt->branchmap_werrors,
1202     NRECMEMA, &info->nrecmema);
1203     - pci_read_config_word(pvt->branchmap_werrors,
1204     + pci_read_config_dword(pvt->branchmap_werrors,
1205     NRECMEMB, &info->nrecmemb);
1206    
1207     /* Clear the error bits, by writing them back */
1208     @@ -1293,7 +1293,7 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
1209     dimm->mtype = MEM_FB_DDR2;
1210    
1211     /* ask what device type on this row */
1212     - if (MTR_DRAM_WIDTH(mtr))
1213     + if (MTR_DRAM_WIDTH(mtr) == 8)
1214     dimm->dtype = DEV_X8;
1215     else
1216     dimm->dtype = DEV_X4;
1217     diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
1218     index 6ef6ad1ba16e..2ea2f32e608b 100644
1219     --- a/drivers/edac/i5400_edac.c
1220     +++ b/drivers/edac/i5400_edac.c
1221     @@ -368,7 +368,7 @@ struct i5400_error_info {
1222    
1223     /* These registers are input ONLY if there was a Non-Rec Error */
1224     u16 nrecmema; /* Non-Recoverable Mem log A */
1225     - u16 nrecmemb; /* Non-Recoverable Mem log B */
1226     + u32 nrecmemb; /* Non-Recoverable Mem log B */
1227    
1228     };
1229    
1230     @@ -458,7 +458,7 @@ static void i5400_get_error_info(struct mem_ctl_info *mci,
1231     NERR_FAT_FBD, &info->nerr_fat_fbd);
1232     pci_read_config_word(pvt->branchmap_werrors,
1233     NRECMEMA, &info->nrecmema);
1234     - pci_read_config_word(pvt->branchmap_werrors,
1235     + pci_read_config_dword(pvt->branchmap_werrors,
1236     NRECMEMB, &info->nrecmemb);
1237    
1238     /* Clear the error bits, by writing them back */
1239     @@ -1207,13 +1207,14 @@ static int i5400_init_dimms(struct mem_ctl_info *mci)
1240    
1241     dimm->nr_pages = size_mb << 8;
1242     dimm->grain = 8;
1243     - dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
1244     + dimm->dtype = MTR_DRAM_WIDTH(mtr) == 8 ?
1245     + DEV_X8 : DEV_X4;
1246     dimm->mtype = MEM_FB_DDR2;
1247     /*
1248     * The eccc mechanism is SDDC (aka SECC), with
1249     * is similar to Chipkill.
1250     */
1251     - dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
1252     + dimm->edac_mode = MTR_DRAM_WIDTH(mtr) == 8 ?
1253     EDAC_S8ECD8ED : EDAC_S4ECD4ED;
1254     ndimms++;
1255     }
1256     diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
1257     index a4944e22f294..2f48f848865f 100644
1258     --- a/drivers/firmware/efi/efi.c
1259     +++ b/drivers/firmware/efi/efi.c
1260     @@ -120,8 +120,7 @@ static ssize_t systab_show(struct kobject *kobj,
1261     return str - buf;
1262     }
1263    
1264     -static struct kobj_attribute efi_attr_systab =
1265     - __ATTR(systab, 0400, systab_show, NULL);
1266     +static struct kobj_attribute efi_attr_systab = __ATTR_RO_MODE(systab, 0400);
1267    
1268     #define EFI_FIELD(var) efi.var
1269    
1270     diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
1271     index 14914074f716..307ec1c11276 100644
1272     --- a/drivers/firmware/efi/esrt.c
1273     +++ b/drivers/firmware/efi/esrt.c
1274     @@ -106,7 +106,7 @@ static const struct sysfs_ops esre_attr_ops = {
1275     };
1276    
1277     /* Generic ESRT Entry ("ESRE") support. */
1278     -static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
1279     +static ssize_t fw_class_show(struct esre_entry *entry, char *buf)
1280     {
1281     char *str = buf;
1282    
1283     @@ -117,18 +117,16 @@ static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
1284     return str - buf;
1285     }
1286    
1287     -static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
1288     - esre_fw_class_show, NULL);
1289     +static struct esre_attribute esre_fw_class = __ATTR_RO_MODE(fw_class, 0400);
1290    
1291     #define esre_attr_decl(name, size, fmt) \
1292     -static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
1293     +static ssize_t name##_show(struct esre_entry *entry, char *buf) \
1294     { \
1295     return sprintf(buf, fmt "\n", \
1296     le##size##_to_cpu(entry->esre.esre1->name)); \
1297     } \
1298     \
1299     -static struct esre_attribute esre_##name = __ATTR(name, 0400, \
1300     - esre_##name##_show, NULL)
1301     +static struct esre_attribute esre_##name = __ATTR_RO_MODE(name, 0400)
1302    
1303     esre_attr_decl(fw_type, 32, "%u");
1304     esre_attr_decl(fw_version, 32, "%u");
1305     @@ -193,14 +191,13 @@ static int esre_create_sysfs_entry(void *esre, int entry_num)
1306    
1307     /* support for displaying ESRT fields at the top level */
1308     #define esrt_attr_decl(name, size, fmt) \
1309     -static ssize_t esrt_##name##_show(struct kobject *kobj, \
1310     +static ssize_t name##_show(struct kobject *kobj, \
1311     struct kobj_attribute *attr, char *buf)\
1312     { \
1313     return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
1314     } \
1315     \
1316     -static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
1317     - esrt_##name##_show, NULL)
1318     +static struct kobj_attribute esrt_##name = __ATTR_RO_MODE(name, 0400)
1319    
1320     esrt_attr_decl(fw_resource_count, 32, "%u");
1321     esrt_attr_decl(fw_resource_count_max, 32, "%u");
1322     @@ -431,7 +428,7 @@ static int __init esrt_sysfs_init(void)
1323     err_remove_esrt:
1324     kobject_put(esrt_kobj);
1325     err:
1326     - kfree(esrt);
1327     + memunmap(esrt);
1328     esrt = NULL;
1329     return error;
1330     }
1331     diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
1332     index 8e64b77aeac9..f377609ff141 100644
1333     --- a/drivers/firmware/efi/runtime-map.c
1334     +++ b/drivers/firmware/efi/runtime-map.c
1335     @@ -63,11 +63,11 @@ static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
1336     return map_attr->show(entry, buf);
1337     }
1338    
1339     -static struct map_attribute map_type_attr = __ATTR_RO(type);
1340     -static struct map_attribute map_phys_addr_attr = __ATTR_RO(phys_addr);
1341     -static struct map_attribute map_virt_addr_attr = __ATTR_RO(virt_addr);
1342     -static struct map_attribute map_num_pages_attr = __ATTR_RO(num_pages);
1343     -static struct map_attribute map_attribute_attr = __ATTR_RO(attribute);
1344     +static struct map_attribute map_type_attr = __ATTR_RO_MODE(type, 0400);
1345     +static struct map_attribute map_phys_addr_attr = __ATTR_RO_MODE(phys_addr, 0400);
1346     +static struct map_attribute map_virt_addr_attr = __ATTR_RO_MODE(virt_addr, 0400);
1347     +static struct map_attribute map_num_pages_attr = __ATTR_RO_MODE(num_pages, 0400);
1348     +static struct map_attribute map_attribute_attr = __ATTR_RO_MODE(attribute, 0400);
1349    
1350     /*
1351     * These are default attributes that are added for every memmap entry.
1352     diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
1353     index 5bddbd507ca9..3fe6a21e05a5 100644
1354     --- a/drivers/gpio/gpio-altera.c
1355     +++ b/drivers/gpio/gpio-altera.c
1356     @@ -90,21 +90,18 @@ static int altera_gpio_irq_set_type(struct irq_data *d,
1357    
1358     altera_gc = gpiochip_get_data(irq_data_get_irq_chip_data(d));
1359    
1360     - if (type == IRQ_TYPE_NONE)
1361     + if (type == IRQ_TYPE_NONE) {
1362     + irq_set_handler_locked(d, handle_bad_irq);
1363     return 0;
1364     - if (type == IRQ_TYPE_LEVEL_HIGH &&
1365     - altera_gc->interrupt_trigger == IRQ_TYPE_LEVEL_HIGH)
1366     - return 0;
1367     - if (type == IRQ_TYPE_EDGE_RISING &&
1368     - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_RISING)
1369     - return 0;
1370     - if (type == IRQ_TYPE_EDGE_FALLING &&
1371     - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_FALLING)
1372     - return 0;
1373     - if (type == IRQ_TYPE_EDGE_BOTH &&
1374     - altera_gc->interrupt_trigger == IRQ_TYPE_EDGE_BOTH)
1375     + }
1376     + if (type == altera_gc->interrupt_trigger) {
1377     + if (type == IRQ_TYPE_LEVEL_HIGH)
1378     + irq_set_handler_locked(d, handle_level_irq);
1379     + else
1380     + irq_set_handler_locked(d, handle_simple_irq);
1381     return 0;
1382     -
1383     + }
1384     + irq_set_handler_locked(d, handle_bad_irq);
1385     return -EINVAL;
1386     }
1387    
1388     @@ -230,7 +227,6 @@ static void altera_gpio_irq_edge_handler(struct irq_desc *desc)
1389     chained_irq_exit(chip, desc);
1390     }
1391    
1392     -
1393     static void altera_gpio_irq_leveL_high_handler(struct irq_desc *desc)
1394     {
1395     struct altera_gpio_chip *altera_gc;
1396     @@ -310,7 +306,7 @@ static int altera_gpio_probe(struct platform_device *pdev)
1397     altera_gc->interrupt_trigger = reg;
1398    
1399     ret = gpiochip_irqchip_add(&altera_gc->mmchip.gc, &altera_irq_chip, 0,
1400     - handle_simple_irq, IRQ_TYPE_NONE);
1401     + handle_bad_irq, IRQ_TYPE_NONE);
1402    
1403     if (ret) {
1404     dev_err(&pdev->dev, "could not add irqchip\n");
1405     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1406     index e41d4baebf86..ce9797b6f9c7 100644
1407     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1408     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
1409     @@ -2020,8 +2020,11 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
1410     }
1411    
1412     r = amdgpu_late_init(adev);
1413     - if (r)
1414     + if (r) {
1415     + if (fbcon)
1416     + console_unlock();
1417     return r;
1418     + }
1419    
1420     /* pin cursors */
1421     list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1422     diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
1423     index 26412d2f8c98..ffd673615772 100644
1424     --- a/drivers/gpu/drm/armada/Makefile
1425     +++ b/drivers/gpu/drm/armada/Makefile
1426     @@ -4,5 +4,3 @@ armada-y += armada_510.o
1427     armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
1428    
1429     obj-$(CONFIG_DRM_ARMADA) := armada.o
1430     -
1431     -CFLAGS_armada_trace.o := -I$(src)
1432     diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
1433     index f2ae72ba7d5a..2abc47b554ab 100644
1434     --- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
1435     +++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
1436     @@ -246,6 +246,15 @@ struct exynos_drm_gem *exynos_drm_gem_create(struct drm_device *dev,
1437     if (IS_ERR(exynos_gem))
1438     return exynos_gem;
1439    
1440     + if (!is_drm_iommu_supported(dev) && (flags & EXYNOS_BO_NONCONTIG)) {
1441     + /*
1442     + * when no IOMMU is available, all allocated buffers are
1443     + * contiguous anyway, so drop EXYNOS_BO_NONCONTIG flag
1444     + */
1445     + flags &= ~EXYNOS_BO_NONCONTIG;
1446     + DRM_WARN("Non-contiguous allocation is not supported without IOMMU, falling back to contiguous buffer\n");
1447     + }
1448     +
1449     /* set memory type and cache attribute from user side. */
1450     exynos_gem->flags = flags;
1451    
1452     diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
1453     index cd4599c0523b..db607d51ee2b 100644
1454     --- a/drivers/hid/Kconfig
1455     +++ b/drivers/hid/Kconfig
1456     @@ -175,11 +175,11 @@ config HID_CHERRY
1457     Support for Cherry Cymotion keyboard.
1458    
1459     config HID_CHICONY
1460     - tristate "Chicony Tactical pad"
1461     + tristate "Chicony devices"
1462     depends on HID
1463     default !EXPERT
1464     ---help---
1465     - Support for Chicony Tactical pad.
1466     + Support for Chicony Tactical pad and special keys on Chicony keyboards.
1467    
1468     config HID_CORSAIR
1469     tristate "Corsair devices"
1470     diff --git a/drivers/hid/hid-chicony.c b/drivers/hid/hid-chicony.c
1471     index bc3cec199fee..f04ed9aabc3f 100644
1472     --- a/drivers/hid/hid-chicony.c
1473     +++ b/drivers/hid/hid-chicony.c
1474     @@ -86,6 +86,7 @@ static const struct hid_device_id ch_devices[] = {
1475     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
1476     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
1477     { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
1478     + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
1479     { }
1480     };
1481     MODULE_DEVICE_TABLE(hid, ch_devices);
1482     diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
1483     index 4f3f5749b0c1..bdde8859e191 100644
1484     --- a/drivers/hid/hid-core.c
1485     +++ b/drivers/hid/hid-core.c
1486     @@ -1906,6 +1906,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1487     { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
1488     { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
1489     { HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
1490     + { HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
1491     { HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
1492     { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
1493     { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
1494     diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
1495     index 08fd3f831d62..433d5f675c03 100644
1496     --- a/drivers/hid/hid-ids.h
1497     +++ b/drivers/hid/hid-ids.h
1498     @@ -558,6 +558,7 @@
1499    
1500     #define USB_VENDOR_ID_JESS 0x0c45
1501     #define USB_DEVICE_ID_JESS_YUREX 0x1010
1502     +#define USB_DEVICE_ID_JESS_ZEN_AIO_KBD 0x5112
1503    
1504     #define USB_VENDOR_ID_JESS2 0x0f30
1505     #define USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD 0x0111
1506     diff --git a/drivers/i2c/busses/i2c-riic.c b/drivers/i2c/busses/i2c-riic.c
1507     index 8f11d347b3ec..c811af4c8d81 100644
1508     --- a/drivers/i2c/busses/i2c-riic.c
1509     +++ b/drivers/i2c/busses/i2c-riic.c
1510     @@ -218,8 +218,12 @@ static irqreturn_t riic_tend_isr(int irq, void *data)
1511     }
1512    
1513     if (riic->is_last || riic->err) {
1514     - riic_clear_set_bit(riic, 0, ICIER_SPIE, RIIC_ICIER);
1515     + riic_clear_set_bit(riic, ICIER_TEIE, ICIER_SPIE, RIIC_ICIER);
1516     writeb(ICCR2_SP, riic->base + RIIC_ICCR2);
1517     + } else {
1518     + /* Transfer is complete, but do not send STOP */
1519     + riic_clear_set_bit(riic, ICIER_TEIE, 0, RIIC_ICIER);
1520     + complete(&riic->msg_done);
1521     }
1522    
1523     return IRQ_HANDLED;
1524     diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
1525     index c22454383976..709d6491d243 100644
1526     --- a/drivers/infiniband/hw/mlx4/qp.c
1527     +++ b/drivers/infiniband/hw/mlx4/qp.c
1528     @@ -1669,7 +1669,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp,
1529     context->mtu_msgmax = (IB_MTU_4096 << 5) |
1530     ilog2(dev->dev->caps.max_gso_sz);
1531     else
1532     - context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1533     + context->mtu_msgmax = (IB_MTU_4096 << 5) | 13;
1534     } else if (attr_mask & IB_QP_PATH_MTU) {
1535     if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_4096) {
1536     pr_err("path MTU (%u) is invalid\n",
1537     diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
1538     index 786f640fc462..a2120ff0ef4c 100644
1539     --- a/drivers/infiniband/hw/mlx5/main.c
1540     +++ b/drivers/infiniband/hw/mlx5/main.c
1541     @@ -2514,6 +2514,8 @@ static int create_umr_res(struct mlx5_ib_dev *dev)
1542     qp->real_qp = qp;
1543     qp->uobject = NULL;
1544     qp->qp_type = MLX5_IB_QPT_REG_UMR;
1545     + qp->send_cq = init_attr->send_cq;
1546     + qp->recv_cq = init_attr->recv_cq;
1547    
1548     attr->qp_state = IB_QPS_INIT;
1549     attr->port_num = 1;
1550     diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
1551     index 002f8a421efa..88bbc8ccc5e3 100644
1552     --- a/drivers/iommu/intel-iommu.c
1553     +++ b/drivers/iommu/intel-iommu.c
1554     @@ -2245,10 +2245,12 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1555     uint64_t tmp;
1556    
1557     if (!sg_res) {
1558     + unsigned int pgoff = sg->offset & ~PAGE_MASK;
1559     +
1560     sg_res = aligned_nrpages(sg->offset, sg->length);
1561     - sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
1562     + sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + pgoff;
1563     sg->dma_length = sg->length;
1564     - pteval = page_to_phys(sg_page(sg)) | prot;
1565     + pteval = (sg_phys(sg) - pgoff) | prot;
1566     phys_pfn = pteval >> VTD_PAGE_SHIFT;
1567     }
1568    
1569     @@ -3894,7 +3896,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
1570    
1571     for_each_sg(sglist, sg, nelems, i) {
1572     BUG_ON(!sg_page(sg));
1573     - sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
1574     + sg->dma_address = sg_phys(sg);
1575     sg->dma_length = sg->length;
1576     }
1577     return nelems;
1578     diff --git a/drivers/irqchip/irq-crossbar.c b/drivers/irqchip/irq-crossbar.c
1579     index 05bbf171df37..1070b7b959f2 100644
1580     --- a/drivers/irqchip/irq-crossbar.c
1581     +++ b/drivers/irqchip/irq-crossbar.c
1582     @@ -199,7 +199,7 @@ static const struct irq_domain_ops crossbar_domain_ops = {
1583     static int __init crossbar_of_init(struct device_node *node)
1584     {
1585     int i, size, reserved = 0;
1586     - u32 max = 0, entry;
1587     + u32 max = 0, entry, reg_size;
1588     const __be32 *irqsr;
1589     int ret = -ENOMEM;
1590    
1591     @@ -276,9 +276,9 @@ static int __init crossbar_of_init(struct device_node *node)
1592     if (!cb->register_offsets)
1593     goto err_irq_map;
1594    
1595     - of_property_read_u32(node, "ti,reg-size", &size);
1596     + of_property_read_u32(node, "ti,reg-size", &reg_size);
1597    
1598     - switch (size) {
1599     + switch (reg_size) {
1600     case 1:
1601     cb->write = crossbar_writeb;
1602     break;
1603     @@ -304,7 +304,7 @@ static int __init crossbar_of_init(struct device_node *node)
1604     continue;
1605    
1606     cb->register_offsets[i] = reserved;
1607     - reserved += size;
1608     + reserved += reg_size;
1609     }
1610    
1611     of_property_read_u32(node, "ti,irqs-safe-map", &cb->safe_map);
1612     diff --git a/drivers/media/rc/lirc_dev.c b/drivers/media/rc/lirc_dev.c
1613     index 6ebe89551961..f4509ef9922b 100644
1614     --- a/drivers/media/rc/lirc_dev.c
1615     +++ b/drivers/media/rc/lirc_dev.c
1616     @@ -446,6 +446,8 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
1617     return -ERESTARTSYS;
1618    
1619     ir = irctls[iminor(inode)];
1620     + mutex_unlock(&lirc_dev_lock);
1621     +
1622     if (!ir) {
1623     retval = -ENODEV;
1624     goto error;
1625     @@ -486,8 +488,6 @@ int lirc_dev_fop_open(struct inode *inode, struct file *file)
1626     }
1627    
1628     error:
1629     - mutex_unlock(&lirc_dev_lock);
1630     -
1631     nonseekable_open(inode, file);
1632    
1633     return retval;
1634     diff --git a/drivers/media/usb/dvb-usb/dibusb-common.c b/drivers/media/usb/dvb-usb/dibusb-common.c
1635     index 8207e6900656..bcacb0f22028 100644
1636     --- a/drivers/media/usb/dvb-usb/dibusb-common.c
1637     +++ b/drivers/media/usb/dvb-usb/dibusb-common.c
1638     @@ -223,8 +223,20 @@ EXPORT_SYMBOL(dibusb_i2c_algo);
1639    
1640     int dibusb_read_eeprom_byte(struct dvb_usb_device *d, u8 offs, u8 *val)
1641     {
1642     - u8 wbuf[1] = { offs };
1643     - return dibusb_i2c_msg(d, 0x50, wbuf, 1, val, 1);
1644     + u8 *buf;
1645     + int rc;
1646     +
1647     + buf = kmalloc(2, GFP_KERNEL);
1648     + if (!buf)
1649     + return -ENOMEM;
1650     +
1651     + buf[0] = offs;
1652     +
1653     + rc = dibusb_i2c_msg(d, 0x50, &buf[0], 1, &buf[1], 1);
1654     + *val = buf[1];
1655     + kfree(buf);
1656     +
1657     + return rc;
1658     }
1659     EXPORT_SYMBOL(dibusb_read_eeprom_byte);
1660    
1661     diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
1662     index 5457c361ad58..bf0fe0137dfe 100644
1663     --- a/drivers/memory/omap-gpmc.c
1664     +++ b/drivers/memory/omap-gpmc.c
1665     @@ -1947,9 +1947,7 @@ static int gpmc_probe_onenand_child(struct platform_device *pdev,
1666     if (!of_property_read_u32(child, "dma-channel", &val))
1667     gpmc_onenand_data->dma_channel = val;
1668    
1669     - gpmc_onenand_init(gpmc_onenand_data);
1670     -
1671     - return 0;
1672     + return gpmc_onenand_init(gpmc_onenand_data);
1673     }
1674     #else
1675     static int gpmc_probe_onenand_child(struct platform_device *pdev,
1676     diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
1677     index 6749b1829469..4d01d7bc24ef 100644
1678     --- a/drivers/net/can/ti_hecc.c
1679     +++ b/drivers/net/can/ti_hecc.c
1680     @@ -652,6 +652,9 @@ static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
1681     mbx_mask = hecc_read(priv, HECC_CANMIM);
1682     mbx_mask |= HECC_TX_MBOX_MASK;
1683     hecc_write(priv, HECC_CANMIM, mbx_mask);
1684     + } else {
1685     + /* repoll is done only if whole budget is used */
1686     + num_pkts = quota;
1687     }
1688    
1689     return num_pkts;
1690     diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
1691     index b3d02759c226..b00358297424 100644
1692     --- a/drivers/net/can/usb/ems_usb.c
1693     +++ b/drivers/net/can/usb/ems_usb.c
1694     @@ -288,6 +288,8 @@ static void ems_usb_read_interrupt_callback(struct urb *urb)
1695    
1696     case -ECONNRESET: /* unlink */
1697     case -ENOENT:
1698     + case -EPIPE:
1699     + case -EPROTO:
1700     case -ESHUTDOWN:
1701     return;
1702    
1703     diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
1704     index 9fdb0f0bfa06..c6dcf93675c0 100644
1705     --- a/drivers/net/can/usb/esd_usb2.c
1706     +++ b/drivers/net/can/usb/esd_usb2.c
1707     @@ -393,6 +393,8 @@ static void esd_usb2_read_bulk_callback(struct urb *urb)
1708     break;
1709    
1710     case -ENOENT:
1711     + case -EPIPE:
1712     + case -EPROTO:
1713     case -ESHUTDOWN:
1714     return;
1715    
1716     diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
1717     index 4224e066cb16..c9d61a6dfb7a 100644
1718     --- a/drivers/net/can/usb/kvaser_usb.c
1719     +++ b/drivers/net/can/usb/kvaser_usb.c
1720     @@ -609,8 +609,8 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id,
1721     }
1722    
1723     if (pos + tmp->len > actual_len) {
1724     - dev_err(dev->udev->dev.parent,
1725     - "Format error\n");
1726     + dev_err_ratelimited(dev->udev->dev.parent,
1727     + "Format error\n");
1728     break;
1729     }
1730    
1731     @@ -813,6 +813,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
1732     if (err) {
1733     netdev_err(netdev, "Error transmitting URB\n");
1734     usb_unanchor_urb(urb);
1735     + kfree(buf);
1736     usb_free_urb(urb);
1737     return err;
1738     }
1739     @@ -1325,6 +1326,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1740     case 0:
1741     break;
1742     case -ENOENT:
1743     + case -EPIPE:
1744     + case -EPROTO:
1745     case -ESHUTDOWN:
1746     return;
1747     default:
1748     @@ -1333,7 +1336,7 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1749     goto resubmit_urb;
1750     }
1751    
1752     - while (pos <= urb->actual_length - MSG_HEADER_LEN) {
1753     + while (pos <= (int)(urb->actual_length - MSG_HEADER_LEN)) {
1754     msg = urb->transfer_buffer + pos;
1755    
1756     /* The Kvaser firmware can only read and write messages that
1757     @@ -1352,7 +1355,8 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb)
1758     }
1759    
1760     if (pos + msg->len > urb->actual_length) {
1761     - dev_err(dev->udev->dev.parent, "Format error\n");
1762     + dev_err_ratelimited(dev->udev->dev.parent,
1763     + "Format error\n");
1764     break;
1765     }
1766    
1767     @@ -1768,6 +1772,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
1768     spin_unlock_irqrestore(&priv->tx_contexts_lock, flags);
1769    
1770     usb_unanchor_urb(urb);
1771     + kfree(buf);
1772    
1773     stats->tx_dropped++;
1774    
1775     diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
1776     index d000cb62d6ae..27861c417c94 100644
1777     --- a/drivers/net/can/usb/usb_8dev.c
1778     +++ b/drivers/net/can/usb/usb_8dev.c
1779     @@ -524,6 +524,8 @@ static void usb_8dev_read_bulk_callback(struct urb *urb)
1780     break;
1781    
1782     case -ENOENT:
1783     + case -EPIPE:
1784     + case -EPROTO:
1785     case -ESHUTDOWN:
1786     return;
1787    
1788     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1789     index 4febe60eadc2..5d958b5bb8b1 100644
1790     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1791     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
1792     @@ -13293,17 +13293,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
1793     dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1794     NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
1795    
1796     - /* VF with OLD Hypervisor or old PF do not support filtering */
1797     if (IS_PF(bp)) {
1798     if (chip_is_e1x)
1799     bp->accept_any_vlan = true;
1800     else
1801     dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1802     -#ifdef CONFIG_BNX2X_SRIOV
1803     - } else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
1804     - dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1805     -#endif
1806     }
1807     + /* For VF we'll know whether to enable VLAN filtering after
1808     + * getting a response to CHANNEL_TLV_ACQUIRE from PF.
1809     + */
1810    
1811     dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
1812     dev->features |= NETIF_F_HIGHDMA;
1813     @@ -13735,7 +13733,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1814     if (!netif_running(bp->dev)) {
1815     DP(BNX2X_MSG_PTP,
1816     "PTP adjfreq called while the interface is down\n");
1817     - return -EFAULT;
1818     + return -ENETDOWN;
1819     }
1820    
1821     if (ppb < 0) {
1822     @@ -13794,6 +13792,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1823     {
1824     struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
1825    
1826     + if (!netif_running(bp->dev)) {
1827     + DP(BNX2X_MSG_PTP,
1828     + "PTP adjtime called while the interface is down\n");
1829     + return -ENETDOWN;
1830     + }
1831     +
1832     DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
1833    
1834     timecounter_adjtime(&bp->timecounter, delta);
1835     @@ -13806,6 +13810,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
1836     struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
1837     u64 ns;
1838    
1839     + if (!netif_running(bp->dev)) {
1840     + DP(BNX2X_MSG_PTP,
1841     + "PTP gettime called while the interface is down\n");
1842     + return -ENETDOWN;
1843     + }
1844     +
1845     ns = timecounter_read(&bp->timecounter);
1846    
1847     DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
1848     @@ -13821,6 +13831,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
1849     struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
1850     u64 ns;
1851    
1852     + if (!netif_running(bp->dev)) {
1853     + DP(BNX2X_MSG_PTP,
1854     + "PTP settime called while the interface is down\n");
1855     + return -ENETDOWN;
1856     + }
1857     +
1858     ns = timespec64_to_ns(ts);
1859    
1860     DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
1861     @@ -13988,6 +14004,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
1862     rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
1863     if (rc)
1864     goto init_one_freemem;
1865     +
1866     +#ifdef CONFIG_BNX2X_SRIOV
1867     + /* VF with OLD Hypervisor or old PF do not support filtering */
1868     + if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
1869     + dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1870     + dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
1871     + }
1872     +#endif
1873     }
1874    
1875     /* Enable SRIOV if capability found in configuration space */
1876     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1877     index 3f77d0863543..c6e059119b22 100644
1878     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1879     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
1880     @@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
1881    
1882     /* Add/Remove the filter */
1883     rc = bnx2x_config_vlan_mac(bp, &ramrod);
1884     - if (rc && rc != -EEXIST) {
1885     + if (rc == -EEXIST)
1886     + return 0;
1887     + if (rc) {
1888     BNX2X_ERR("Failed to %s %s\n",
1889     filter->add ? "add" : "delete",
1890     (filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
1891     @@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
1892     return rc;
1893     }
1894    
1895     + filter->applied = true;
1896     +
1897     return 0;
1898     }
1899    
1900     @@ -471,6 +475,8 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
1901     BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
1902     i, filters->count + 1);
1903     while (--i >= 0) {
1904     + if (!filters->filters[i].applied)
1905     + continue;
1906     filters->filters[i].add = !filters->filters[i].add;
1907     bnx2x_vf_mac_vlan_config(bp, vf, qid,
1908     &filters->filters[i],
1909     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
1910     index 7a6d406f4c11..888d0b6632e8 100644
1911     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
1912     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
1913     @@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
1914     (BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
1915    
1916     bool add;
1917     + bool applied;
1918     u8 *mac;
1919     u16 vid;
1920     };
1921     diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1922     index bfae300cf25f..c2d327d9dff0 100644
1923     --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1924     +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
1925     @@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
1926     struct bnx2x *bp = netdev_priv(dev);
1927     struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
1928     struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
1929     - int rc, i = 0;
1930     + int rc = 0, i = 0;
1931     struct netdev_hw_addr *ha;
1932    
1933     if (bp->state != BNX2X_STATE_OPEN) {
1934     @@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
1935     /* Get Rx mode requested */
1936     DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
1937    
1938     + /* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
1939     + if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
1940     + DP(NETIF_MSG_IFUP,
1941     + "VF supports not more than %d multicast MAC addresses\n",
1942     + PFVF_MAX_MULTICAST_PER_VF);
1943     + rc = -EINVAL;
1944     + goto out;
1945     + }
1946     +
1947     netdev_for_each_mc_addr(ha, dev) {
1948     DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
1949     bnx2x_mc_addr(ha));
1950     @@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
1951     i++;
1952     }
1953    
1954     - /* We support four PFVF_MAX_MULTICAST_PER_VF mcast
1955     - * addresses tops
1956     - */
1957     - if (i >= PFVF_MAX_MULTICAST_PER_VF) {
1958     - DP(NETIF_MSG_IFUP,
1959     - "VF supports not more than %d multicast MAC addresses\n",
1960     - PFVF_MAX_MULTICAST_PER_VF);
1961     - return -EINVAL;
1962     - }
1963     -
1964     req->n_multicast = i;
1965     req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
1966     req->vf_qid = 0;
1967     @@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
1968     out:
1969     bnx2x_vfpf_finalize(bp, &req->first_tlv);
1970    
1971     - return 0;
1972     + return rc;
1973     }
1974    
1975     /* request pf to add a vlan for the vf */
1976     diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
1977     index b8778e7b1f79..7c6c1468628b 100644
1978     --- a/drivers/net/ethernet/ibm/ibmvnic.c
1979     +++ b/drivers/net/ethernet/ibm/ibmvnic.c
1980     @@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
1981     send_map_query(adapter);
1982     for (i = 0; i < rxadd_subcrqs; i++) {
1983     init_rx_pool(adapter, &adapter->rx_pool[i],
1984     - IBMVNIC_BUFFS_PER_POOL, i,
1985     + adapter->req_rx_add_entries_per_subcrq, i,
1986     be64_to_cpu(size_array[i]), 1);
1987     if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
1988     dev_err(dev, "Couldn't alloc rx pool\n");
1989     @@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
1990     for (i = 0; i < tx_subcrqs; i++) {
1991     tx_pool = &adapter->tx_pool[i];
1992     tx_pool->tx_buff =
1993     - kcalloc(adapter->max_tx_entries_per_subcrq,
1994     + kcalloc(adapter->req_tx_entries_per_subcrq,
1995     sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
1996     if (!tx_pool->tx_buff)
1997     goto tx_pool_alloc_failed;
1998    
1999     if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
2000     - adapter->max_tx_entries_per_subcrq *
2001     + adapter->req_tx_entries_per_subcrq *
2002     adapter->req_mtu))
2003     goto tx_ltb_alloc_failed;
2004    
2005     tx_pool->free_map =
2006     - kcalloc(adapter->max_tx_entries_per_subcrq,
2007     + kcalloc(adapter->req_tx_entries_per_subcrq,
2008     sizeof(int), GFP_KERNEL);
2009     if (!tx_pool->free_map)
2010     goto tx_fm_alloc_failed;
2011    
2012     - for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
2013     + for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
2014     tx_pool->free_map[j] = j;
2015    
2016     tx_pool->consumer_index = 0;
2017     @@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2018     u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
2019     struct device *dev = &adapter->vdev->dev;
2020     struct ibmvnic_tx_buff *tx_buff = NULL;
2021     + struct ibmvnic_sub_crq_queue *tx_scrq;
2022     struct ibmvnic_tx_pool *tx_pool;
2023     unsigned int tx_send_failed = 0;
2024     unsigned int tx_map_failed = 0;
2025     @@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2026     int ret = 0;
2027    
2028     tx_pool = &adapter->tx_pool[queue_num];
2029     + tx_scrq = adapter->tx_scrq[queue_num];
2030     txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
2031     handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
2032     be32_to_cpu(adapter->login_rsp_buf->
2033     @@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2034    
2035     tx_pool->consumer_index =
2036     (tx_pool->consumer_index + 1) %
2037     - adapter->max_tx_entries_per_subcrq;
2038     + adapter->req_tx_entries_per_subcrq;
2039    
2040     tx_buff = &tx_pool->tx_buff[index];
2041     tx_buff->skb = skb;
2042     @@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2043    
2044     if (tx_pool->consumer_index == 0)
2045     tx_pool->consumer_index =
2046     - adapter->max_tx_entries_per_subcrq - 1;
2047     + adapter->req_tx_entries_per_subcrq - 1;
2048     else
2049     tx_pool->consumer_index--;
2050    
2051     @@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
2052     ret = NETDEV_TX_BUSY;
2053     goto out;
2054     }
2055     +
2056     + atomic_inc(&tx_scrq->used);
2057     +
2058     + if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
2059     + netdev_info(netdev, "Stopping queue %d\n", queue_num);
2060     + netif_stop_subqueue(netdev, queue_num);
2061     + }
2062     +
2063     tx_packets++;
2064     tx_bytes += skb->len;
2065     txq->trans_start = jiffies;
2066     @@ -1220,6 +1230,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
2067     scrq->adapter = adapter;
2068     scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
2069     scrq->cur = 0;
2070     + atomic_set(&scrq->used, 0);
2071     scrq->rx_skb_top = NULL;
2072     spin_lock_init(&scrq->lock);
2073    
2074     @@ -1368,14 +1379,28 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
2075     DMA_TO_DEVICE);
2076     }
2077    
2078     - if (txbuff->last_frag)
2079     + if (txbuff->last_frag) {
2080     + atomic_dec(&scrq->used);
2081     +
2082     + if (atomic_read(&scrq->used) <=
2083     + (adapter->req_tx_entries_per_subcrq / 2) &&
2084     + netif_subqueue_stopped(adapter->netdev,
2085     + txbuff->skb)) {
2086     + netif_wake_subqueue(adapter->netdev,
2087     + scrq->pool_index);
2088     + netdev_dbg(adapter->netdev,
2089     + "Started queue %d\n",
2090     + scrq->pool_index);
2091     + }
2092     +
2093     dev_kfree_skb_any(txbuff->skb);
2094     + }
2095    
2096     adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
2097     producer_index] = index;
2098     adapter->tx_pool[pool].producer_index =
2099     (adapter->tx_pool[pool].producer_index + 1) %
2100     - adapter->max_tx_entries_per_subcrq;
2101     + adapter->req_tx_entries_per_subcrq;
2102     }
2103     /* remove tx_comp scrq*/
2104     next->tx_comp.first = 0;
2105     diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h
2106     index dd775d951b73..892eda346e54 100644
2107     --- a/drivers/net/ethernet/ibm/ibmvnic.h
2108     +++ b/drivers/net/ethernet/ibm/ibmvnic.h
2109     @@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
2110     spinlock_t lock;
2111     struct sk_buff *rx_skb_top;
2112     struct ibmvnic_adapter *adapter;
2113     + atomic_t used;
2114     };
2115    
2116     struct ibmvnic_long_term_buff {
2117     diff --git a/drivers/net/phy/spi_ks8995.c b/drivers/net/phy/spi_ks8995.c
2118     index 93ffedfa2994..1e2d4f1179da 100644
2119     --- a/drivers/net/phy/spi_ks8995.c
2120     +++ b/drivers/net/phy/spi_ks8995.c
2121     @@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
2122     if (err)
2123     return err;
2124    
2125     - ks->regs_attr.size = ks->chip->regs_size;
2126     memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
2127     + ks->regs_attr.size = ks->chip->regs_size;
2128    
2129     err = ks8995_reset(ks);
2130     if (err)
2131     return err;
2132    
2133     + sysfs_attr_init(&ks->regs_attr.attr);
2134     err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
2135     if (err) {
2136     dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
2137     diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2138     index 8e3c6f4bdaa0..edffe5aeeeb1 100644
2139     --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2140     +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
2141     @@ -4080,8 +4080,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err,
2142     sdio_release_host(sdiodev->func[1]);
2143     fail:
2144     brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), err);
2145     - device_release_driver(dev);
2146     device_release_driver(&sdiodev->func[2]->dev);
2147     + device_release_driver(dev);
2148     }
2149    
2150     struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev)
2151     diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
2152     index d2a28a9d3209..4b462dc21c41 100644
2153     --- a/drivers/net/wireless/mac80211_hwsim.c
2154     +++ b/drivers/net/wireless/mac80211_hwsim.c
2155     @@ -3047,6 +3047,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
2156     {
2157     struct hwsim_new_radio_params param = { 0 };
2158     const char *hwname = NULL;
2159     + int ret;
2160    
2161     param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
2162     param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
2163     @@ -3086,7 +3087,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info)
2164     param.regd = hwsim_world_regdom_custom[idx];
2165     }
2166    
2167     - return mac80211_hwsim_new_radio(info, &param);
2168     + ret = mac80211_hwsim_new_radio(info, &param);
2169     + kfree(hwname);
2170     + return ret;
2171     }
2172    
2173     static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info)
2174     diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
2175     index 9013a585507e..f32fc704cb7e 100644
2176     --- a/drivers/rapidio/devices/rio_mport_cdev.c
2177     +++ b/drivers/rapidio/devices/rio_mport_cdev.c
2178     @@ -964,7 +964,8 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode,
2179     req->sgt.sgl, req->sgt.nents, dir);
2180     if (nents == -EFAULT) {
2181     rmcd_error("Failed to map SG list");
2182     - return -EFAULT;
2183     + ret = -EFAULT;
2184     + goto err_pg;
2185     }
2186    
2187     ret = do_dma_request(req, xfer, sync, nents);
2188     diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
2189     index 4df3cdcf88ce..9c9563312a3d 100644
2190     --- a/drivers/scsi/lpfc/lpfc_els.c
2191     +++ b/drivers/scsi/lpfc/lpfc_els.c
2192     @@ -8185,11 +8185,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2193     spin_lock_irq(shost->host_lock);
2194     vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
2195     spin_unlock_irq(shost->host_lock);
2196     - if (vport->port_type == LPFC_PHYSICAL_PORT
2197     - && !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
2198     - lpfc_issue_init_vfi(vport);
2199     - else
2200     + if (mb->mbxStatus == MBX_NOT_FINISHED)
2201     + break;
2202     + if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
2203     + !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
2204     + if (phba->sli_rev == LPFC_SLI_REV4)
2205     + lpfc_issue_init_vfi(vport);
2206     + else
2207     + lpfc_initial_flogi(vport);
2208     + } else {
2209     lpfc_initial_fdisc(vport);
2210     + }
2211     break;
2212     }
2213     } else {
2214     diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
2215     index 658e4d15cb71..ce4ac769a9a2 100644
2216     --- a/drivers/scsi/qla2xxx/qla_dbg.c
2217     +++ b/drivers/scsi/qla2xxx/qla_dbg.c
2218     @@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2219     "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2220     ql_dbg(level, vha, id,
2221     "----- -----------------------------------------------\n");
2222     - for (cnt = 0; cnt < size; cnt++, buf++) {
2223     - if (cnt % 16 == 0)
2224     - ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
2225     - printk(" %02x", *buf);
2226     - if (cnt % 16 == 15)
2227     - printk("\n");
2228     + for (cnt = 0; cnt < size; cnt += 16) {
2229     + ql_dbg(level, vha, id, "%04x: ", cnt);
2230     + print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2231     + buf + cnt, min(16U, size - cnt), false);
2232     }
2233     - if (cnt % 16 != 0)
2234     - printk("\n");
2235     }
2236     diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
2237     index d8099c7cab00..c7b770075caa 100644
2238     --- a/drivers/scsi/scsi_lib.c
2239     +++ b/drivers/scsi/scsi_lib.c
2240     @@ -2041,11 +2041,13 @@ static void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q)
2241     q->limits.cluster = 0;
2242    
2243     /*
2244     - * set a reasonable default alignment on word boundaries: the
2245     - * host and device may alter it using
2246     - * blk_queue_update_dma_alignment() later.
2247     + * Set a reasonable default alignment: The larger of 32-byte (dword),
2248     + * which is a common minimum for HBAs, and the minimum DMA alignment,
2249     + * which is set by the platform.
2250     + *
2251     + * Devices that require a bigger alignment can increase it later.
2252     */
2253     - blk_queue_dma_alignment(q, 0x03);
2254     + blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1);
2255     }
2256    
2257     struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
2258     diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
2259     index 8e281e47afec..b7995474148c 100644
2260     --- a/drivers/spi/Kconfig
2261     +++ b/drivers/spi/Kconfig
2262     @@ -365,7 +365,6 @@ config SPI_FSL_SPI
2263     config SPI_FSL_DSPI
2264     tristate "Freescale DSPI controller"
2265     select REGMAP_MMIO
2266     - depends on HAS_DMA
2267     depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
2268     help
2269     This enables support for the Freescale DSPI controller in master
2270     diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
2271     index c61ddbf94bc7..16c67120d72b 100644
2272     --- a/drivers/usb/dwc3/gadget.c
2273     +++ b/drivers/usb/dwc3/gadget.c
2274     @@ -3092,15 +3092,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc)
2275    
2276     int dwc3_gadget_suspend(struct dwc3 *dwc)
2277     {
2278     - int ret;
2279     -
2280     if (!dwc->gadget_driver)
2281     return 0;
2282    
2283     - ret = dwc3_gadget_run_stop(dwc, false, false);
2284     - if (ret < 0)
2285     - return ret;
2286     -
2287     + dwc3_gadget_run_stop(dwc, false, false);
2288     dwc3_disconnect_gadget(dwc);
2289     __dwc3_gadget_stop(dwc);
2290    
2291     diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
2292     index 502a096fc380..a5ca409dc97e 100644
2293     --- a/drivers/usb/gadget/configfs.c
2294     +++ b/drivers/usb/gadget/configfs.c
2295     @@ -269,6 +269,7 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
2296     ret = unregister_gadget(gi);
2297     if (ret)
2298     goto err;
2299     + kfree(name);
2300     } else {
2301     if (gi->composite.gadget_driver.udc_name) {
2302     ret = -EBUSY;
2303     diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
2304     index 4fce83266926..346a630cebd5 100644
2305     --- a/drivers/usb/gadget/function/f_fs.c
2306     +++ b/drivers/usb/gadget/function/f_fs.c
2307     @@ -2262,9 +2262,18 @@ static int __ffs_data_do_os_desc(enum ffs_os_desc_type type,
2308     int i;
2309    
2310     if (len < sizeof(*d) ||
2311     - d->bFirstInterfaceNumber >= ffs->interfaces_count ||
2312     - d->Reserved1)
2313     + d->bFirstInterfaceNumber >= ffs->interfaces_count)
2314     return -EINVAL;
2315     + if (d->Reserved1 != 1) {
2316     + /*
2317     + * According to the spec, Reserved1 must be set to 1
2318     + * but older kernels incorrectly rejected non-zero
2319     + * values. We fix it here to avoid returning EINVAL
2320     + * in response to values we used to accept.
2321     + */
2322     + pr_debug("usb_ext_compat_desc::Reserved1 forced to 1\n");
2323     + d->Reserved1 = 1;
2324     + }
2325     for (i = 0; i < ARRAY_SIZE(d->Reserved2); ++i)
2326     if (d->Reserved2[i])
2327     return -EINVAL;
2328     diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
2329     index f69dbd4bcd18..b8534d3f8bb0 100644
2330     --- a/drivers/usb/gadget/legacy/inode.c
2331     +++ b/drivers/usb/gadget/legacy/inode.c
2332     @@ -1819,8 +1819,10 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
2333    
2334     spin_lock_irq (&dev->lock);
2335     value = -EINVAL;
2336     - if (dev->buf)
2337     + if (dev->buf) {
2338     + kfree(kbuf);
2339     goto fail;
2340     + }
2341     dev->buf = kbuf;
2342    
2343     /* full or low speed config */
2344     diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c
2345     index 33f3987218f7..d133252ef2c3 100644
2346     --- a/drivers/usb/gadget/udc/net2280.c
2347     +++ b/drivers/usb/gadget/udc/net2280.c
2348     @@ -1146,15 +1146,15 @@ static int scan_dma_completions(struct net2280_ep *ep)
2349     */
2350     while (!list_empty(&ep->queue)) {
2351     struct net2280_request *req;
2352     - u32 tmp;
2353     + u32 req_dma_count;
2354    
2355     req = list_entry(ep->queue.next,
2356     struct net2280_request, queue);
2357     if (!req->valid)
2358     break;
2359     rmb();
2360     - tmp = le32_to_cpup(&req->td->dmacount);
2361     - if ((tmp & BIT(VALID_BIT)) != 0)
2362     + req_dma_count = le32_to_cpup(&req->td->dmacount);
2363     + if ((req_dma_count & BIT(VALID_BIT)) != 0)
2364     break;
2365    
2366     /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
2367     @@ -1163,40 +1163,41 @@ static int scan_dma_completions(struct net2280_ep *ep)
2368     */
2369     if (unlikely(req->td->dmadesc == 0)) {
2370     /* paranoia */
2371     - tmp = readl(&ep->dma->dmacount);
2372     - if (tmp & DMA_BYTE_COUNT_MASK)
2373     + u32 const ep_dmacount = readl(&ep->dma->dmacount);
2374     +
2375     + if (ep_dmacount & DMA_BYTE_COUNT_MASK)
2376     break;
2377     /* single transfer mode */
2378     - dma_done(ep, req, tmp, 0);
2379     + dma_done(ep, req, req_dma_count, 0);
2380     num_completed++;
2381     break;
2382     } else if (!ep->is_in &&
2383     (req->req.length % ep->ep.maxpacket) &&
2384     !(ep->dev->quirks & PLX_PCIE)) {
2385    
2386     - tmp = readl(&ep->regs->ep_stat);
2387     + u32 const ep_stat = readl(&ep->regs->ep_stat);
2388     /* AVOID TROUBLE HERE by not issuing short reads from
2389     * your gadget driver. That helps avoids errata 0121,
2390     * 0122, and 0124; not all cases trigger the warning.
2391     */
2392     - if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
2393     + if ((ep_stat & BIT(NAK_OUT_PACKETS)) == 0) {
2394     ep_warn(ep->dev, "%s lost packet sync!\n",
2395     ep->ep.name);
2396     req->req.status = -EOVERFLOW;
2397     } else {
2398     - tmp = readl(&ep->regs->ep_avail);
2399     - if (tmp) {
2400     + u32 const ep_avail = readl(&ep->regs->ep_avail);
2401     + if (ep_avail) {
2402     /* fifo gets flushed later */
2403     ep->out_overflow = 1;
2404     ep_dbg(ep->dev,
2405     "%s dma, discard %d len %d\n",
2406     - ep->ep.name, tmp,
2407     + ep->ep.name, ep_avail,
2408     req->req.length);
2409     req->req.status = -EOVERFLOW;
2410     }
2411     }
2412     }
2413     - dma_done(ep, req, tmp, 0);
2414     + dma_done(ep, req, req_dma_count, 0);
2415     num_completed++;
2416     }
2417    
2418     diff --git a/drivers/usb/gadget/udc/pxa27x_udc.c b/drivers/usb/gadget/udc/pxa27x_udc.c
2419     index 7fa60f5b7ae4..afd6b86458c5 100644
2420     --- a/drivers/usb/gadget/udc/pxa27x_udc.c
2421     +++ b/drivers/usb/gadget/udc/pxa27x_udc.c
2422     @@ -2534,9 +2534,10 @@ static int pxa_udc_remove(struct platform_device *_dev)
2423     usb_del_gadget_udc(&udc->gadget);
2424     pxa_cleanup_debugfs(udc);
2425    
2426     - if (!IS_ERR_OR_NULL(udc->transceiver))
2427     + if (!IS_ERR_OR_NULL(udc->transceiver)) {
2428     usb_unregister_notifier(udc->transceiver, &pxa27x_udc_phy);
2429     - usb_put_phy(udc->transceiver);
2430     + usb_put_phy(udc->transceiver);
2431     + }
2432    
2433     udc->transceiver = NULL;
2434     the_controller = NULL;
2435     diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c
2436     index bb89e24c48b4..2197a50ed2ab 100644
2437     --- a/drivers/usb/gadget/udc/renesas_usb3.c
2438     +++ b/drivers/usb/gadget/udc/renesas_usb3.c
2439     @@ -222,7 +222,7 @@
2440     #define USB3_EP0_SS_MAX_PACKET_SIZE 512
2441     #define USB3_EP0_HSFS_MAX_PACKET_SIZE 64
2442     #define USB3_EP0_BUF_SIZE 8
2443     -#define USB3_MAX_NUM_PIPES 30
2444     +#define USB3_MAX_NUM_PIPES 6 /* This includes PIPE 0 */
2445     #define USB3_WAIT_US 3
2446    
2447     struct renesas_usb3;
2448     diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c
2449     index 7062bb0975a5..462e183609b6 100644
2450     --- a/drivers/virtio/virtio.c
2451     +++ b/drivers/virtio/virtio.c
2452     @@ -323,6 +323,8 @@ int register_virtio_device(struct virtio_device *dev)
2453     /* device_register() causes the bus infrastructure to look for a
2454     * matching driver. */
2455     err = device_register(&dev->dev);
2456     + if (err)
2457     + ida_simple_remove(&virtio_index_ida, dev->index);
2458     out:
2459     if (err)
2460     add_status(dev, VIRTIO_CONFIG_S_FAILED);
2461     diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c
2462     index d764236072b1..8d2c5180e015 100644
2463     --- a/fs/afs/cmservice.c
2464     +++ b/fs/afs/cmservice.c
2465     @@ -106,6 +106,9 @@ bool afs_cm_incoming_call(struct afs_call *call)
2466     case CBProbe:
2467     call->type = &afs_SRXCBProbe;
2468     return true;
2469     + case CBProbeUuid:
2470     + call->type = &afs_SRXCBProbeUuid;
2471     + return true;
2472     case CBTellMeAboutYourself:
2473     call->type = &afs_SRXCBTellMeAboutYourself;
2474     return true;
2475     diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
2476     index c4cff5cc9c93..a29730c44850 100644
2477     --- a/fs/btrfs/extent-tree.c
2478     +++ b/fs/btrfs/extent-tree.c
2479     @@ -9362,6 +9362,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
2480     ret = btrfs_del_root(trans, tree_root, &root->root_key);
2481     if (ret) {
2482     btrfs_abort_transaction(trans, ret);
2483     + err = ret;
2484     goto out_end_trans;
2485     }
2486    
2487     diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
2488     index 65566d5fcf39..1e5321d1ed22 100644
2489     --- a/fs/nfs/dir.c
2490     +++ b/fs/nfs/dir.c
2491     @@ -2098,7 +2098,7 @@ int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
2492     if (new_inode != NULL)
2493     nfs_drop_nlink(new_inode);
2494     d_move(old_dentry, new_dentry);
2495     - nfs_set_verifier(new_dentry,
2496     + nfs_set_verifier(old_dentry,
2497     nfs_save_change_attribute(new_dir));
2498     } else if (error == -ENOENT)
2499     nfs_dentry_handle_enoent(old_dentry);
2500     diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
2501     index fe9a9a183b2d..98ca9f1b6a07 100644
2502     --- a/fs/xfs/xfs_inode.c
2503     +++ b/fs/xfs/xfs_inode.c
2504     @@ -2386,6 +2386,7 @@ xfs_ifree_cluster(
2505     */
2506     if (ip->i_ino != inum + i) {
2507     xfs_iunlock(ip, XFS_ILOCK_EXCL);
2508     + rcu_read_unlock();
2509     continue;
2510     }
2511     }
2512     diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
2513     index 08528afdf58b..704caae69c42 100644
2514     --- a/include/linux/dma-mapping.h
2515     +++ b/include/linux/dma-mapping.h
2516     @@ -659,7 +659,6 @@ static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
2517     return ret;
2518     }
2519    
2520     -#ifdef CONFIG_HAS_DMA
2521     static inline int dma_get_cache_alignment(void)
2522     {
2523     #ifdef ARCH_DMA_MINALIGN
2524     @@ -667,7 +666,6 @@ static inline int dma_get_cache_alignment(void)
2525     #endif
2526     return 1;
2527     }
2528     -#endif
2529    
2530     /* flags for the coherent memory api */
2531     #define DMA_MEMORY_MAP 0x01
2532     diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
2533     index 29d4385903d4..206fe3bccccc 100644
2534     --- a/include/linux/genalloc.h
2535     +++ b/include/linux/genalloc.h
2536     @@ -32,6 +32,7 @@
2537    
2538     #include <linux/types.h>
2539     #include <linux/spinlock_types.h>
2540     +#include <linux/atomic.h>
2541    
2542     struct device;
2543     struct device_node;
2544     @@ -70,7 +71,7 @@ struct gen_pool {
2545     */
2546     struct gen_pool_chunk {
2547     struct list_head next_chunk; /* next chunk in pool */
2548     - atomic_t avail;
2549     + atomic_long_t avail;
2550     phys_addr_t phys_addr; /* physical starting address of memory chunk */
2551     unsigned long start_addr; /* start address of memory chunk */
2552     unsigned long end_addr; /* end address of memory chunk (inclusive) */
2553     diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
2554     index 25c0dc31f084..854dfa6fa6e3 100644
2555     --- a/include/linux/mmu_notifier.h
2556     +++ b/include/linux/mmu_notifier.h
2557     @@ -381,18 +381,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
2558     ___pmd; \
2559     })
2560    
2561     -#define pmdp_huge_get_and_clear_notify(__mm, __haddr, __pmd) \
2562     -({ \
2563     - unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
2564     - pmd_t ___pmd; \
2565     - \
2566     - ___pmd = pmdp_huge_get_and_clear(__mm, __haddr, __pmd); \
2567     - mmu_notifier_invalidate_range(__mm, ___haddr, \
2568     - ___haddr + HPAGE_PMD_SIZE); \
2569     - \
2570     - ___pmd; \
2571     -})
2572     -
2573     /*
2574     * set_pte_at_notify() sets the pte _after_ running the notifier.
2575     * This is safe to start by updating the secondary MMUs, because the primary MMU
2576     @@ -480,7 +468,6 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
2577     #define pmdp_clear_young_notify pmdp_test_and_clear_young
2578     #define ptep_clear_flush_notify ptep_clear_flush
2579     #define pmdp_huge_clear_flush_notify pmdp_huge_clear_flush
2580     -#define pmdp_huge_get_and_clear_notify pmdp_huge_get_and_clear
2581     #define set_pte_at_notify set_pte_at
2582    
2583     #endif /* CONFIG_MMU_NOTIFIER */
2584     diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h
2585     index 35d0fd7a4948..e821a3132a3e 100644
2586     --- a/include/linux/omap-gpmc.h
2587     +++ b/include/linux/omap-gpmc.h
2588     @@ -88,10 +88,11 @@ static inline int gpmc_nand_init(struct omap_nand_platform_data *d,
2589     #endif
2590    
2591     #if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2)
2592     -extern void gpmc_onenand_init(struct omap_onenand_platform_data *d);
2593     +extern int gpmc_onenand_init(struct omap_onenand_platform_data *d);
2594     #else
2595     #define board_onenand_data NULL
2596     -static inline void gpmc_onenand_init(struct omap_onenand_platform_data *d)
2597     +static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d)
2598     {
2599     + return 0;
2600     }
2601     #endif
2602     diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
2603     index c6f0f0d0e17e..00a1f330f93a 100644
2604     --- a/include/linux/sysfs.h
2605     +++ b/include/linux/sysfs.h
2606     @@ -116,6 +116,12 @@ struct attribute_group {
2607     .show = _name##_show, \
2608     }
2609    
2610     +#define __ATTR_RO_MODE(_name, _mode) { \
2611     + .attr = { .name = __stringify(_name), \
2612     + .mode = VERIFY_OCTAL_PERMISSIONS(_mode) }, \
2613     + .show = _name##_show, \
2614     +}
2615     +
2616     #define __ATTR_WO(_name) { \
2617     .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \
2618     .store = _name##_store, \
2619     diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
2620     index dae99d7d2bc0..706a7017885c 100644
2621     --- a/include/scsi/libsas.h
2622     +++ b/include/scsi/libsas.h
2623     @@ -165,11 +165,11 @@ struct expander_device {
2624    
2625     struct sata_device {
2626     unsigned int class;
2627     - struct smp_resp rps_resp; /* report_phy_sata_resp */
2628     u8 port_no; /* port number, if this is a PM (Port) */
2629    
2630     struct ata_port *ap;
2631     struct ata_host ata_host;
2632     + struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
2633     u8 fis[ATA_RESP_FIS_SIZE];
2634     };
2635    
2636     diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
2637     index 5c51d1985b51..673fa6fe2d73 100644
2638     --- a/kernel/bpf/percpu_freelist.c
2639     +++ b/kernel/bpf/percpu_freelist.c
2640     @@ -78,8 +78,10 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
2641     {
2642     struct pcpu_freelist_head *head;
2643     struct pcpu_freelist_node *node;
2644     + unsigned long flags;
2645     int orig_cpu, cpu;
2646    
2647     + local_irq_save(flags);
2648     orig_cpu = cpu = raw_smp_processor_id();
2649     while (1) {
2650     head = per_cpu_ptr(s->freelist, cpu);
2651     @@ -87,14 +89,16 @@ struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
2652     node = head->first;
2653     if (node) {
2654     head->first = node->next;
2655     - raw_spin_unlock(&head->lock);
2656     + raw_spin_unlock_irqrestore(&head->lock, flags);
2657     return node;
2658     }
2659     raw_spin_unlock(&head->lock);
2660     cpu = cpumask_next(cpu, cpu_possible_mask);
2661     if (cpu >= nr_cpu_ids)
2662     cpu = 0;
2663     - if (cpu == orig_cpu)
2664     + if (cpu == orig_cpu) {
2665     + local_irq_restore(flags);
2666     return NULL;
2667     + }
2668     }
2669     }
2670     diff --git a/kernel/cpu.c b/kernel/cpu.c
2671     index 26a4f74bff83..e1436ca4aed0 100644
2672     --- a/kernel/cpu.c
2673     +++ b/kernel/cpu.c
2674     @@ -1321,11 +1321,6 @@ static struct cpuhp_step cpuhp_bp_states[] = {
2675     .teardown.single = NULL,
2676     .cant_stop = true,
2677     },
2678     - [CPUHP_AP_SMPCFD_DYING] = {
2679     - .name = "smpcfd:dying",
2680     - .startup.single = NULL,
2681     - .teardown.single = smpcfd_dying_cpu,
2682     - },
2683     /*
2684     * Handled on controll processor until the plugged processor manages
2685     * this itself.
2686     @@ -1367,6 +1362,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
2687     .startup.single = NULL,
2688     .teardown.single = rcutree_dying_cpu,
2689     },
2690     + [CPUHP_AP_SMPCFD_DYING] = {
2691     + .name = "smpcfd:dying",
2692     + .startup.single = NULL,
2693     + .teardown.single = smpcfd_dying_cpu,
2694     + },
2695     /* Entry state on starting. Interrupts enabled from here on. Transient
2696     * state for synchronsization */
2697     [CPUHP_AP_ONLINE] = {
2698     diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c
2699     index fc1ef736253c..77777d918676 100644
2700     --- a/kernel/debug/kdb/kdb_io.c
2701     +++ b/kernel/debug/kdb/kdb_io.c
2702     @@ -349,7 +349,7 @@ static char *kdb_read(char *buffer, size_t bufsize)
2703     }
2704     kdb_printf("\n");
2705     for (i = 0; i < count; i++) {
2706     - if (kallsyms_symbol_next(p_tmp, i) < 0)
2707     + if (WARN_ON(!kallsyms_symbol_next(p_tmp, i)))
2708     break;
2709     kdb_printf("%s ", p_tmp);
2710     *(p_tmp + len) = '\0';
2711     diff --git a/kernel/jump_label.c b/kernel/jump_label.c
2712     index a9b8cf500591..def4548ea40c 100644
2713     --- a/kernel/jump_label.c
2714     +++ b/kernel/jump_label.c
2715     @@ -612,7 +612,7 @@ static __init int jump_label_test(void)
2716    
2717     return 0;
2718     }
2719     -late_initcall(jump_label_test);
2720     +early_initcall(jump_label_test);
2721     #endif /* STATIC_KEYS_SELFTEST */
2722    
2723     #endif /* HAVE_JUMP_LABEL */
2724     diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
2725     index 7a68c631d5b5..3d862f5b0331 100644
2726     --- a/kernel/sched/fair.c
2727     +++ b/kernel/sched/fair.c
2728     @@ -5451,7 +5451,7 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
2729     * Due to large variance we need a large fuzz factor; hackbench in
2730     * particularly is sensitive here.
2731     */
2732     - if ((avg_idle / 512) < avg_cost)
2733     + if (sched_feat(SIS_AVG_CPU) && (avg_idle / 512) < avg_cost)
2734     return -1;
2735    
2736     time = local_clock();
2737     diff --git a/kernel/sched/features.h b/kernel/sched/features.h
2738     index 69631fa46c2f..1b3c8189b286 100644
2739     --- a/kernel/sched/features.h
2740     +++ b/kernel/sched/features.h
2741     @@ -51,6 +51,11 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
2742     */
2743     SCHED_FEAT(TTWU_QUEUE, true)
2744    
2745     +/*
2746     + * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
2747     + */
2748     +SCHED_FEAT(SIS_AVG_CPU, false)
2749     +
2750     #ifdef HAVE_RT_PUSH_IPI
2751     /*
2752     * In order to avoid a thundering herd attack of CPUs that are
2753     diff --git a/kernel/workqueue.c b/kernel/workqueue.c
2754     index 296dcca77f33..181c2ad0cb54 100644
2755     --- a/kernel/workqueue.c
2756     +++ b/kernel/workqueue.c
2757     @@ -1506,6 +1506,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq,
2758     struct timer_list *timer = &dwork->timer;
2759     struct work_struct *work = &dwork->work;
2760    
2761     + WARN_ON_ONCE(!wq);
2762     WARN_ON_ONCE(timer->function != delayed_work_timer_fn ||
2763     timer->data != (unsigned long)dwork);
2764     WARN_ON_ONCE(timer_pending(timer));
2765     diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
2766     index 1ef0cec38d78..dc14beae2c9a 100644
2767     --- a/lib/asn1_decoder.c
2768     +++ b/lib/asn1_decoder.c
2769     @@ -313,42 +313,47 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
2770    
2771     /* Decide how to handle the operation */
2772     switch (op) {
2773     - case ASN1_OP_MATCH_ANY_ACT:
2774     - case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
2775     - case ASN1_OP_COND_MATCH_ANY_ACT:
2776     - case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
2777     - ret = actions[machine[pc + 1]](context, hdr, tag, data + dp, len);
2778     - if (ret < 0)
2779     - return ret;
2780     - goto skip_data;
2781     -
2782     - case ASN1_OP_MATCH_ACT:
2783     - case ASN1_OP_MATCH_ACT_OR_SKIP:
2784     - case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
2785     - ret = actions[machine[pc + 2]](context, hdr, tag, data + dp, len);
2786     - if (ret < 0)
2787     - return ret;
2788     - goto skip_data;
2789     -
2790     case ASN1_OP_MATCH:
2791     case ASN1_OP_MATCH_OR_SKIP:
2792     + case ASN1_OP_MATCH_ACT:
2793     + case ASN1_OP_MATCH_ACT_OR_SKIP:
2794     case ASN1_OP_MATCH_ANY:
2795     case ASN1_OP_MATCH_ANY_OR_SKIP:
2796     + case ASN1_OP_MATCH_ANY_ACT:
2797     + case ASN1_OP_MATCH_ANY_ACT_OR_SKIP:
2798     case ASN1_OP_COND_MATCH_OR_SKIP:
2799     + case ASN1_OP_COND_MATCH_ACT_OR_SKIP:
2800     case ASN1_OP_COND_MATCH_ANY:
2801     case ASN1_OP_COND_MATCH_ANY_OR_SKIP:
2802     - skip_data:
2803     + case ASN1_OP_COND_MATCH_ANY_ACT:
2804     + case ASN1_OP_COND_MATCH_ANY_ACT_OR_SKIP:
2805     +
2806     if (!(flags & FLAG_CONS)) {
2807     if (flags & FLAG_INDEFINITE_LENGTH) {
2808     + size_t tmp = dp;
2809     +
2810     ret = asn1_find_indefinite_length(
2811     - data, datalen, &dp, &len, &errmsg);
2812     + data, datalen, &tmp, &len, &errmsg);
2813     if (ret < 0)
2814     goto error;
2815     - } else {
2816     - dp += len;
2817     }
2818     pr_debug("- LEAF: %zu\n", len);
2819     }
2820     +
2821     + if (op & ASN1_OP_MATCH__ACT) {
2822     + unsigned char act;
2823     +
2824     + if (op & ASN1_OP_MATCH__ANY)
2825     + act = machine[pc + 1];
2826     + else
2827     + act = machine[pc + 2];
2828     + ret = actions[act](context, hdr, tag, data + dp, len);
2829     + if (ret < 0)
2830     + return ret;
2831     + }
2832     +
2833     + if (!(flags & FLAG_CONS))
2834     + dp += len;
2835     pc += asn1_op_lengths[op];
2836     goto next_op;
2837    
2838     @@ -434,6 +439,8 @@ int asn1_ber_decoder(const struct asn1_decoder *decoder,
2839     else
2840     act = machine[pc + 1];
2841     ret = actions[act](context, hdr, 0, data + tdp, len);
2842     + if (ret < 0)
2843     + return ret;
2844     }
2845     pc += asn1_op_lengths[op];
2846     goto next_op;
2847     diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
2848     index da796e2dc4f5..c7c96bc7654a 100644
2849     --- a/lib/dynamic_debug.c
2850     +++ b/lib/dynamic_debug.c
2851     @@ -360,6 +360,10 @@ static int ddebug_parse_query(char *words[], int nwords,
2852     if (parse_lineno(last, &query->last_lineno) < 0)
2853     return -EINVAL;
2854    
2855     + /* special case for last lineno not specified */
2856     + if (query->last_lineno == 0)
2857     + query->last_lineno = UINT_MAX;
2858     +
2859     if (query->last_lineno < query->first_lineno) {
2860     pr_err("last-line:%d < 1st-line:%d\n",
2861     query->last_lineno,
2862     diff --git a/lib/genalloc.c b/lib/genalloc.c
2863     index 144fe6b1a03e..ca06adc4f445 100644
2864     --- a/lib/genalloc.c
2865     +++ b/lib/genalloc.c
2866     @@ -194,7 +194,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy
2867     chunk->phys_addr = phys;
2868     chunk->start_addr = virt;
2869     chunk->end_addr = virt + size - 1;
2870     - atomic_set(&chunk->avail, size);
2871     + atomic_long_set(&chunk->avail, size);
2872    
2873     spin_lock(&pool->lock);
2874     list_add_rcu(&chunk->next_chunk, &pool->chunks);
2875     @@ -304,7 +304,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
2876     nbits = (size + (1UL << order) - 1) >> order;
2877     rcu_read_lock();
2878     list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
2879     - if (size > atomic_read(&chunk->avail))
2880     + if (size > atomic_long_read(&chunk->avail))
2881     continue;
2882    
2883     start_bit = 0;
2884     @@ -324,7 +324,7 @@ unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size,
2885    
2886     addr = chunk->start_addr + ((unsigned long)start_bit << order);
2887     size = nbits << order;
2888     - atomic_sub(size, &chunk->avail);
2889     + atomic_long_sub(size, &chunk->avail);
2890     break;
2891     }
2892     rcu_read_unlock();
2893     @@ -390,7 +390,7 @@ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
2894     remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
2895     BUG_ON(remain);
2896     size = nbits << order;
2897     - atomic_add(size, &chunk->avail);
2898     + atomic_long_add(size, &chunk->avail);
2899     rcu_read_unlock();
2900     return;
2901     }
2902     @@ -464,7 +464,7 @@ size_t gen_pool_avail(struct gen_pool *pool)
2903    
2904     rcu_read_lock();
2905     list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
2906     - avail += atomic_read(&chunk->avail);
2907     + avail += atomic_long_read(&chunk->avail);
2908     rcu_read_unlock();
2909     return avail;
2910     }
2911     diff --git a/mm/huge_memory.c b/mm/huge_memory.c
2912     index 3cae1dcf069c..c234c078693c 100644
2913     --- a/mm/huge_memory.c
2914     +++ b/mm/huge_memory.c
2915     @@ -1509,37 +1509,69 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
2916     {
2917     struct mm_struct *mm = vma->vm_mm;
2918     spinlock_t *ptl;
2919     - int ret = 0;
2920     + pmd_t entry;
2921     + bool preserve_write;
2922     + int ret;
2923    
2924     ptl = __pmd_trans_huge_lock(pmd, vma);
2925     - if (ptl) {
2926     - pmd_t entry;
2927     - bool preserve_write = prot_numa && pmd_write(*pmd);
2928     - ret = 1;
2929     + if (!ptl)
2930     + return 0;
2931    
2932     - /*
2933     - * Avoid trapping faults against the zero page. The read-only
2934     - * data is likely to be read-cached on the local CPU and
2935     - * local/remote hits to the zero page are not interesting.
2936     - */
2937     - if (prot_numa && is_huge_zero_pmd(*pmd)) {
2938     - spin_unlock(ptl);
2939     - return ret;
2940     - }
2941     + preserve_write = prot_numa && pmd_write(*pmd);
2942     + ret = 1;
2943    
2944     - if (!prot_numa || !pmd_protnone(*pmd)) {
2945     - entry = pmdp_huge_get_and_clear_notify(mm, addr, pmd);
2946     - entry = pmd_modify(entry, newprot);
2947     - if (preserve_write)
2948     - entry = pmd_mkwrite(entry);
2949     - ret = HPAGE_PMD_NR;
2950     - set_pmd_at(mm, addr, pmd, entry);
2951     - BUG_ON(vma_is_anonymous(vma) && !preserve_write &&
2952     - pmd_write(entry));
2953     - }
2954     - spin_unlock(ptl);
2955     - }
2956     + /*
2957     + * Avoid trapping faults against the zero page. The read-only
2958     + * data is likely to be read-cached on the local CPU and
2959     + * local/remote hits to the zero page are not interesting.
2960     + */
2961     + if (prot_numa && is_huge_zero_pmd(*pmd))
2962     + goto unlock;
2963     +
2964     + if (prot_numa && pmd_protnone(*pmd))
2965     + goto unlock;
2966     +
2967     + /*
2968     + * In case prot_numa, we are under down_read(mmap_sem). It's critical
2969     + * to not clear pmd intermittently to avoid race with MADV_DONTNEED
2970     + * which is also under down_read(mmap_sem):
2971     + *
2972     + * CPU0: CPU1:
2973     + * change_huge_pmd(prot_numa=1)
2974     + * pmdp_huge_get_and_clear_notify()
2975     + * madvise_dontneed()
2976     + * zap_pmd_range()
2977     + * pmd_trans_huge(*pmd) == 0 (without ptl)
2978     + * // skip the pmd
2979     + * set_pmd_at();
2980     + * // pmd is re-established
2981     + *
2982     + * The race makes MADV_DONTNEED miss the huge pmd and don't clear it
2983     + * which may break userspace.
2984     + *
2985     + * pmdp_invalidate() is required to make sure we don't miss
2986     + * dirty/young flags set by hardware.
2987     + */
2988     + entry = *pmd;
2989     + pmdp_invalidate(vma, addr, pmd);
2990    
2991     + /*
2992     + * Recover dirty/young flags. It relies on pmdp_invalidate to not
2993     + * corrupt them.
2994     + */
2995     + if (pmd_dirty(*pmd))
2996     + entry = pmd_mkdirty(entry);
2997     + if (pmd_young(*pmd))
2998     + entry = pmd_mkyoung(entry);
2999     +
3000     + entry = pmd_modify(entry, newprot);
3001     + if (preserve_write)
3002     + entry = pmd_mkwrite(entry);
3003     + ret = HPAGE_PMD_NR;
3004     + set_pmd_at(mm, addr, pmd, entry);
3005     + BUG_ON(vma_is_anonymous(vma) && !preserve_write && pmd_write(entry));
3006     +unlock:
3007     + spin_unlock(ptl);
3008     return ret;
3009     }
3010    
3011     diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
3012     index 1689bb58e0d1..d3548c48369f 100644
3013     --- a/mm/zsmalloc.c
3014     +++ b/mm/zsmalloc.c
3015     @@ -1407,7 +1407,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
3016     * pools/users, we can't allow mapping in interrupt context
3017     * because it can corrupt another users mappings.
3018     */
3019     - WARN_ON_ONCE(in_interrupt());
3020     + BUG_ON(in_interrupt());
3021    
3022     /* From now on, migration cannot move the object */
3023     pin_tag(handle);
3024     diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
3025     index 713c09a74b90..0c9ded247ebb 100644
3026     --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
3027     +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
3028     @@ -158,6 +158,10 @@ static unsigned int ipv4_conntrack_local(void *priv,
3029     if (skb->len < sizeof(struct iphdr) ||
3030     ip_hdrlen(skb) < sizeof(struct iphdr))
3031     return NF_ACCEPT;
3032     +
3033     + if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
3034     + return NF_ACCEPT;
3035     +
3036     return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
3037     }
3038    
3039     diff --git a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
3040     index f8aad03d674b..6f5e8d01b876 100644
3041     --- a/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
3042     +++ b/net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
3043     @@ -255,11 +255,6 @@ nf_nat_ipv4_fn(void *priv, struct sk_buff *skb,
3044     /* maniptype == SRC for postrouting. */
3045     enum nf_nat_manip_type maniptype = HOOK2MANIP(state->hook);
3046    
3047     - /* We never see fragments: conntrack defrags on pre-routing
3048     - * and local-out, and nf_nat_out protects post-routing.
3049     - */
3050     - NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
3051     -
3052     ct = nf_ct_get(skb, &ctinfo);
3053     /* Can't track? It's not due to stress, or conntrack would
3054     * have dropped it. Hence it's the user's responsibilty to
3055     diff --git a/net/ipv4/route.c b/net/ipv4/route.c
3056     index 6a5b7783932e..7ac319222558 100644
3057     --- a/net/ipv4/route.c
3058     +++ b/net/ipv4/route.c
3059     @@ -630,9 +630,12 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
3060     struct fnhe_hash_bucket *hash;
3061     struct fib_nh_exception *fnhe;
3062     struct rtable *rt;
3063     + u32 genid, hval;
3064     unsigned int i;
3065     int depth;
3066     - u32 hval = fnhe_hashfun(daddr);
3067     +
3068     + genid = fnhe_genid(dev_net(nh->nh_dev));
3069     + hval = fnhe_hashfun(daddr);
3070    
3071     spin_lock_bh(&fnhe_lock);
3072    
3073     @@ -655,12 +658,13 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
3074     }
3075    
3076     if (fnhe) {
3077     + if (fnhe->fnhe_genid != genid)
3078     + fnhe->fnhe_genid = genid;
3079     if (gw)
3080     fnhe->fnhe_gw = gw;
3081     - if (pmtu) {
3082     + if (pmtu)
3083     fnhe->fnhe_pmtu = pmtu;
3084     - fnhe->fnhe_expires = max(1UL, expires);
3085     - }
3086     + fnhe->fnhe_expires = max(1UL, expires);
3087     /* Update all cached dsts too */
3088     rt = rcu_dereference(fnhe->fnhe_rth_input);
3089     if (rt)
3090     @@ -679,7 +683,7 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
3091     fnhe->fnhe_next = hash->chain;
3092     rcu_assign_pointer(hash->chain, fnhe);
3093     }
3094     - fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
3095     + fnhe->fnhe_genid = genid;
3096     fnhe->fnhe_daddr = daddr;
3097     fnhe->fnhe_gw = gw;
3098     fnhe->fnhe_pmtu = pmtu;
3099     diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
3100     index 46ad699937fd..8285a1c108c9 100644
3101     --- a/net/ipv6/af_inet6.c
3102     +++ b/net/ipv6/af_inet6.c
3103     @@ -909,12 +909,12 @@ static int __init inet6_init(void)
3104     err = register_pernet_subsys(&inet6_net_ops);
3105     if (err)
3106     goto register_pernet_fail;
3107     - err = icmpv6_init();
3108     - if (err)
3109     - goto icmp_fail;
3110     err = ip6_mr_init();
3111     if (err)
3112     goto ipmr_fail;
3113     + err = icmpv6_init();
3114     + if (err)
3115     + goto icmp_fail;
3116     err = ndisc_init();
3117     if (err)
3118     goto ndisc_fail;
3119     @@ -1044,10 +1044,10 @@ static int __init inet6_init(void)
3120     ndisc_cleanup();
3121     ndisc_fail:
3122     ip6_mr_cleanup();
3123     -ipmr_fail:
3124     - icmpv6_cleanup();
3125     icmp_fail:
3126     unregister_pernet_subsys(&inet6_net_ops);
3127     +ipmr_fail:
3128     + icmpv6_cleanup();
3129     register_pernet_fail:
3130     sock_unregister(PF_INET6);
3131     rtnl_unregister_all(PF_INET6);
3132     diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
3133     index e9b14e3493f2..c46066c5dc27 100644
3134     --- a/net/ipv6/ip6_gre.c
3135     +++ b/net/ipv6/ip6_gre.c
3136     @@ -461,7 +461,7 @@ static int ip6gre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
3137     &ipv6h->saddr, &ipv6h->daddr, tpi->key,
3138     tpi->proto);
3139     if (tunnel) {
3140     - ip6_tnl_rcv(tunnel, skb, tpi, NULL, false);
3141     + ip6_tnl_rcv(tunnel, skb, tpi, NULL, log_ecn_error);
3142    
3143     return PACKET_RCVD;
3144     }
3145     diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
3146     index 67e882d49195..912333586de6 100644
3147     --- a/net/ipv6/ip6_vti.c
3148     +++ b/net/ipv6/ip6_vti.c
3149     @@ -485,11 +485,15 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
3150     if (!skb->ignore_df && skb->len > mtu) {
3151     skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
3152    
3153     - if (skb->protocol == htons(ETH_P_IPV6))
3154     + if (skb->protocol == htons(ETH_P_IPV6)) {
3155     + if (mtu < IPV6_MIN_MTU)
3156     + mtu = IPV6_MIN_MTU;
3157     +
3158     icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
3159     - else
3160     + } else {
3161     icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
3162     htonl(mtu));
3163     + }
3164    
3165     return -EMSGSIZE;
3166     }
3167     diff --git a/net/rds/tcp.c b/net/rds/tcp.c
3168     index 20e2923dc827..78f976d32018 100644
3169     --- a/net/rds/tcp.c
3170     +++ b/net/rds/tcp.c
3171     @@ -478,9 +478,10 @@ static void __net_exit rds_tcp_exit_net(struct net *net)
3172     * we do need to clean up the listen socket here.
3173     */
3174     if (rtn->rds_tcp_listen_sock) {
3175     - rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
3176     + struct socket *lsock = rtn->rds_tcp_listen_sock;
3177     +
3178     rtn->rds_tcp_listen_sock = NULL;
3179     - flush_work(&rtn->rds_tcp_accept_w);
3180     + rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
3181     }
3182     }
3183    
3184     @@ -517,10 +518,10 @@ static void rds_tcp_kill_sock(struct net *net)
3185     struct rds_tcp_connection *tc, *_tc;
3186     LIST_HEAD(tmp_list);
3187     struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
3188     + struct socket *lsock = rtn->rds_tcp_listen_sock;
3189    
3190     - rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
3191     rtn->rds_tcp_listen_sock = NULL;
3192     - flush_work(&rtn->rds_tcp_accept_w);
3193     + rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
3194     spin_lock_irq(&rds_tcp_conn_lock);
3195     list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
3196     struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
3197     @@ -540,8 +541,12 @@ static void rds_tcp_kill_sock(struct net *net)
3198     void *rds_tcp_listen_sock_def_readable(struct net *net)
3199     {
3200     struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
3201     + struct socket *lsock = rtn->rds_tcp_listen_sock;
3202     +
3203     + if (!lsock)
3204     + return NULL;
3205    
3206     - return rtn->rds_tcp_listen_sock->sk->sk_user_data;
3207     + return lsock->sk->sk_user_data;
3208     }
3209    
3210     static int rds_tcp_dev_event(struct notifier_block *this,
3211     diff --git a/net/rds/tcp.h b/net/rds/tcp.h
3212     index 9a1cc8906576..56ea6620fcf9 100644
3213     --- a/net/rds/tcp.h
3214     +++ b/net/rds/tcp.h
3215     @@ -66,7 +66,7 @@ void rds_tcp_state_change(struct sock *sk);
3216    
3217     /* tcp_listen.c */
3218     struct socket *rds_tcp_listen_init(struct net *);
3219     -void rds_tcp_listen_stop(struct socket *);
3220     +void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor);
3221     void rds_tcp_listen_data_ready(struct sock *sk);
3222     int rds_tcp_accept_one(struct socket *sock);
3223     int rds_tcp_keepalive(struct socket *sock);
3224     diff --git a/net/rds/tcp_listen.c b/net/rds/tcp_listen.c
3225     index 525b624fec8b..185a56b1e29c 100644
3226     --- a/net/rds/tcp_listen.c
3227     +++ b/net/rds/tcp_listen.c
3228     @@ -227,6 +227,9 @@ void rds_tcp_listen_data_ready(struct sock *sk)
3229     * before it has been accepted and the accepter has set up their
3230     * data_ready.. we only want to queue listen work for our listening
3231     * socket
3232     + *
3233     + * (*ready)() may be null if we are racing with netns delete, and
3234     + * the listen socket is being torn down.
3235     */
3236     if (sk->sk_state == TCP_LISTEN)
3237     rds_tcp_accept_work(sk);
3238     @@ -235,7 +238,8 @@ void rds_tcp_listen_data_ready(struct sock *sk)
3239    
3240     out:
3241     read_unlock_bh(&sk->sk_callback_lock);
3242     - ready(sk);
3243     + if (ready)
3244     + ready(sk);
3245     }
3246    
3247     struct socket *rds_tcp_listen_init(struct net *net)
3248     @@ -275,7 +279,7 @@ struct socket *rds_tcp_listen_init(struct net *net)
3249     return NULL;
3250     }
3251    
3252     -void rds_tcp_listen_stop(struct socket *sock)
3253     +void rds_tcp_listen_stop(struct socket *sock, struct work_struct *acceptor)
3254     {
3255     struct sock *sk;
3256    
3257     @@ -296,5 +300,6 @@ void rds_tcp_listen_stop(struct socket *sock)
3258    
3259     /* wait for accepts to stop and close the socket */
3260     flush_workqueue(rds_wq);
3261     + flush_work(acceptor);
3262     sock_release(sock);
3263     }
3264     diff --git a/net/sctp/socket.c b/net/sctp/socket.c
3265     index c062ceae19e6..c2ab864da50d 100644
3266     --- a/net/sctp/socket.c
3267     +++ b/net/sctp/socket.c
3268     @@ -82,8 +82,8 @@
3269     /* Forward declarations for internal helper functions. */
3270     static int sctp_writeable(struct sock *sk);
3271     static void sctp_wfree(struct sk_buff *skb);
3272     -static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p,
3273     - size_t msg_len);
3274     +static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3275     + size_t msg_len, struct sock **orig_sk);
3276     static int sctp_wait_for_packet(struct sock *sk, int *err, long *timeo_p);
3277     static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p);
3278     static int sctp_wait_for_accept(struct sock *sk, long timeo);
3279     @@ -1957,9 +1957,16 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
3280    
3281     timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
3282     if (!sctp_wspace(asoc)) {
3283     - err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len);
3284     - if (err)
3285     + /* sk can be changed by peel off when waiting for buf. */
3286     + err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len, &sk);
3287     + if (err) {
3288     + if (err == -ESRCH) {
3289     + /* asoc is already dead. */
3290     + new_asoc = NULL;
3291     + err = -EPIPE;
3292     + }
3293     goto out_free;
3294     + }
3295     }
3296    
3297     /* If an address is passed with the sendto/sendmsg call, it is used
3298     @@ -4771,12 +4778,6 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
3299     if (!asoc)
3300     return -EINVAL;
3301    
3302     - /* If there is a thread waiting on more sndbuf space for
3303     - * sending on this asoc, it cannot be peeled.
3304     - */
3305     - if (waitqueue_active(&asoc->wait))
3306     - return -EBUSY;
3307     -
3308     /* An association cannot be branched off from an already peeled-off
3309     * socket, nor is this supported for tcp style sockets.
3310     */
3311     @@ -7440,7 +7441,7 @@ void sctp_sock_rfree(struct sk_buff *skb)
3312    
3313     /* Helper function to wait for space in the sndbuf. */
3314     static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3315     - size_t msg_len)
3316     + size_t msg_len, struct sock **orig_sk)
3317     {
3318     struct sock *sk = asoc->base.sk;
3319     int err = 0;
3320     @@ -7457,10 +7458,11 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3321     for (;;) {
3322     prepare_to_wait_exclusive(&asoc->wait, &wait,
3323     TASK_INTERRUPTIBLE);
3324     + if (asoc->base.dead)
3325     + goto do_dead;
3326     if (!*timeo_p)
3327     goto do_nonblock;
3328     - if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING ||
3329     - asoc->base.dead)
3330     + if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING)
3331     goto do_error;
3332     if (signal_pending(current))
3333     goto do_interrupted;
3334     @@ -7473,11 +7475,17 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3335     release_sock(sk);
3336     current_timeo = schedule_timeout(current_timeo);
3337     lock_sock(sk);
3338     + if (sk != asoc->base.sk) {
3339     + release_sock(sk);
3340     + sk = asoc->base.sk;
3341     + lock_sock(sk);
3342     + }
3343    
3344     *timeo_p = current_timeo;
3345     }
3346    
3347     out:
3348     + *orig_sk = sk;
3349     finish_wait(&asoc->wait, &wait);
3350    
3351     /* Release the association's refcnt. */
3352     @@ -7485,6 +7493,10 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p,
3353    
3354     return err;
3355    
3356     +do_dead:
3357     + err = -ESRCH;
3358     + goto out;
3359     +
3360     do_error:
3361     err = -EPIPE;
3362     goto out;
3363     diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
3364     index 5db68b371db2..600eacce653a 100644
3365     --- a/net/sunrpc/sched.c
3366     +++ b/net/sunrpc/sched.c
3367     @@ -274,10 +274,9 @@ static inline void rpc_task_set_debuginfo(struct rpc_task *task)
3368    
3369     static void rpc_set_active(struct rpc_task *task)
3370     {
3371     - trace_rpc_task_begin(task->tk_client, task, NULL);
3372     -
3373     rpc_task_set_debuginfo(task);
3374     set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
3375     + trace_rpc_task_begin(task->tk_client, task, NULL);
3376     }
3377    
3378     /*
3379     diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
3380     index 8ce5711ea21b..f19e6a57e118 100644
3381     --- a/net/xfrm/xfrm_policy.c
3382     +++ b/net/xfrm/xfrm_policy.c
3383     @@ -1393,6 +1393,7 @@ static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir)
3384     newp->xfrm_nr = old->xfrm_nr;
3385     newp->index = old->index;
3386     newp->type = old->type;
3387     + newp->family = old->family;
3388     memcpy(newp->xfrm_vec, old->xfrm_vec,
3389     newp->xfrm_nr*sizeof(struct xfrm_tmpl));
3390     spin_lock_bh(&net->xfrm.xfrm_policy_lock);
3391     diff --git a/scripts/coccicheck b/scripts/coccicheck
3392     index ec487b8e7051..c36b04b41686 100755
3393     --- a/scripts/coccicheck
3394     +++ b/scripts/coccicheck
3395     @@ -29,12 +29,6 @@ else
3396     VERBOSE=0
3397     fi
3398    
3399     -if [ -z "$J" ]; then
3400     - NPROC=$(getconf _NPROCESSORS_ONLN)
3401     -else
3402     - NPROC="$J"
3403     -fi
3404     -
3405     FLAGS="--very-quiet"
3406    
3407     # You can use SPFLAGS to append extra arguments to coccicheck or override any
3408     @@ -69,6 +63,9 @@ if [ "$C" = "1" -o "$C" = "2" ]; then
3409     # Take only the last argument, which is the C file to test
3410     shift $(( $# - 1 ))
3411     OPTIONS="$COCCIINCLUDE $1"
3412     +
3413     + # No need to parallelize Coccinelle since this mode takes one input file.
3414     + NPROC=1
3415     else
3416     ONLINE=0
3417     if [ "$KBUILD_EXTMOD" = "" ] ; then
3418     @@ -76,6 +73,12 @@ else
3419     else
3420     OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
3421     fi
3422     +
3423     + if [ -z "$J" ]; then
3424     + NPROC=$(getconf _NPROCESSORS_ONLN)
3425     + else
3426     + NPROC="$J"
3427     + fi
3428     fi
3429    
3430     if [ "$KBUILD_EXTMOD" != "" ] ; then
3431     diff --git a/scripts/module-common.lds b/scripts/module-common.lds
3432     index 73a2c7da0e55..53234e85192a 100644
3433     --- a/scripts/module-common.lds
3434     +++ b/scripts/module-common.lds
3435     @@ -19,4 +19,6 @@ SECTIONS {
3436    
3437     . = ALIGN(8);
3438     .init_array 0 : { *(SORT(.init_array.*)) *(.init_array) }
3439     +
3440     + __jump_table 0 : ALIGN(8) { KEEP(*(__jump_table)) }
3441     }
3442     diff --git a/scripts/package/Makefile b/scripts/package/Makefile
3443     index 71b4a8af9d4d..7badec3498b8 100644
3444     --- a/scripts/package/Makefile
3445     +++ b/scripts/package/Makefile
3446     @@ -39,10 +39,9 @@ if test "$(objtree)" != "$(srctree)"; then \
3447     false; \
3448     fi ; \
3449     $(srctree)/scripts/setlocalversion --save-scmversion; \
3450     -ln -sf $(srctree) $(2); \
3451     tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
3452     - $(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
3453     -rm -f $(2) $(objtree)/.scmversion
3454     + --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
3455     +rm -f $(objtree)/.scmversion
3456    
3457     # rpm-pkg
3458     # ---------------------------------------------------------------------------
3459     diff --git a/security/keys/request_key.c b/security/keys/request_key.c
3460     index 5030fcf23681..cb7f8f730c6d 100644
3461     --- a/security/keys/request_key.c
3462     +++ b/security/keys/request_key.c
3463     @@ -250,11 +250,12 @@ static int construct_key(struct key *key, const void *callout_info,
3464     * The keyring selected is returned with an extra reference upon it which the
3465     * caller must release.
3466     */
3467     -static void construct_get_dest_keyring(struct key **_dest_keyring)
3468     +static int construct_get_dest_keyring(struct key **_dest_keyring)
3469     {
3470     struct request_key_auth *rka;
3471     const struct cred *cred = current_cred();
3472     struct key *dest_keyring = *_dest_keyring, *authkey;
3473     + int ret;
3474    
3475     kenter("%p", dest_keyring);
3476    
3477     @@ -263,6 +264,8 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
3478     /* the caller supplied one */
3479     key_get(dest_keyring);
3480     } else {
3481     + bool do_perm_check = true;
3482     +
3483     /* use a default keyring; falling through the cases until we
3484     * find one that we actually have */
3485     switch (cred->jit_keyring) {
3486     @@ -277,8 +280,10 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
3487     dest_keyring =
3488     key_get(rka->dest_keyring);
3489     up_read(&authkey->sem);
3490     - if (dest_keyring)
3491     + if (dest_keyring) {
3492     + do_perm_check = false;
3493     break;
3494     + }
3495     }
3496    
3497     case KEY_REQKEY_DEFL_THREAD_KEYRING:
3498     @@ -313,11 +318,29 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
3499     default:
3500     BUG();
3501     }
3502     +
3503     + /*
3504     + * Require Write permission on the keyring. This is essential
3505     + * because the default keyring may be the session keyring, and
3506     + * joining a keyring only requires Search permission.
3507     + *
3508     + * However, this check is skipped for the "requestor keyring" so
3509     + * that /sbin/request-key can itself use request_key() to add
3510     + * keys to the original requestor's destination keyring.
3511     + */
3512     + if (dest_keyring && do_perm_check) {
3513     + ret = key_permission(make_key_ref(dest_keyring, 1),
3514     + KEY_NEED_WRITE);
3515     + if (ret) {
3516     + key_put(dest_keyring);
3517     + return ret;
3518     + }
3519     + }
3520     }
3521    
3522     *_dest_keyring = dest_keyring;
3523     kleave(" [dk %d]", key_serial(dest_keyring));
3524     - return;
3525     + return 0;
3526     }
3527    
3528     /*
3529     @@ -443,11 +466,15 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
3530     if (ctx->index_key.type == &key_type_keyring)
3531     return ERR_PTR(-EPERM);
3532    
3533     - user = key_user_lookup(current_fsuid());
3534     - if (!user)
3535     - return ERR_PTR(-ENOMEM);
3536     + ret = construct_get_dest_keyring(&dest_keyring);
3537     + if (ret)
3538     + goto error;
3539    
3540     - construct_get_dest_keyring(&dest_keyring);
3541     + user = key_user_lookup(current_fsuid());
3542     + if (!user) {
3543     + ret = -ENOMEM;
3544     + goto error_put_dest_keyring;
3545     + }
3546    
3547     ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
3548     key_user_put(user);
3549     @@ -462,7 +489,7 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
3550     } else if (ret == -EINPROGRESS) {
3551     ret = 0;
3552     } else {
3553     - goto couldnt_alloc_key;
3554     + goto error_put_dest_keyring;
3555     }
3556    
3557     key_put(dest_keyring);
3558     @@ -472,8 +499,9 @@ static struct key *construct_key_and_link(struct keyring_search_context *ctx,
3559     construction_failed:
3560     key_negate_and_link(key, key_negative_timeout, NULL, NULL);
3561     key_put(key);
3562     -couldnt_alloc_key:
3563     +error_put_dest_keyring:
3564     key_put(dest_keyring);
3565     +error:
3566     kleave(" = %d", ret);
3567     return ERR_PTR(ret);
3568     }
3569     diff --git a/sound/core/pcm.c b/sound/core/pcm.c
3570     index 8e980aa678d0..074363b63cc4 100644
3571     --- a/sound/core/pcm.c
3572     +++ b/sound/core/pcm.c
3573     @@ -149,7 +149,9 @@ static int snd_pcm_control_ioctl(struct snd_card *card,
3574     err = -ENXIO;
3575     goto _error;
3576     }
3577     + mutex_lock(&pcm->open_mutex);
3578     err = snd_pcm_info_user(substream, info);
3579     + mutex_unlock(&pcm->open_mutex);
3580     _error:
3581     mutex_unlock(&register_mutex);
3582     return err;
3583     diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
3584     index 37d9cfbc29f9..b80985fbc334 100644
3585     --- a/sound/core/seq/seq_timer.c
3586     +++ b/sound/core/seq/seq_timer.c
3587     @@ -355,7 +355,7 @@ static int initialize_timer(struct snd_seq_timer *tmr)
3588     unsigned long freq;
3589    
3590     t = tmr->timeri->timer;
3591     - if (snd_BUG_ON(!t))
3592     + if (!t)
3593     return -EINVAL;
3594    
3595     freq = tmr->preferred_resolution;
3596     diff --git a/sound/soc/sh/rcar/ssiu.c b/sound/soc/sh/rcar/ssiu.c
3597     index 6f9b388ec5a8..3f95d6b88f8c 100644
3598     --- a/sound/soc/sh/rcar/ssiu.c
3599     +++ b/sound/soc/sh/rcar/ssiu.c
3600     @@ -44,7 +44,11 @@ static int rsnd_ssiu_init(struct rsnd_mod *mod,
3601     mask1 = (1 << 4) | (1 << 20); /* mask sync bit */
3602     mask2 = (1 << 4); /* mask sync bit */
3603     val1 = val2 = 0;
3604     - if (rsnd_ssi_is_pin_sharing(io)) {
3605     + if (id == 8) {
3606     + /*
3607     + * SSI8 pin is sharing with SSI7, nothing to do.
3608     + */
3609     + } else if (rsnd_ssi_is_pin_sharing(io)) {
3610     int shift = -1;
3611    
3612     switch (id) {
3613     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
3614     index 9133d3e53d9d..24c897f0b571 100644
3615     --- a/sound/usb/mixer.c
3616     +++ b/sound/usb/mixer.c
3617     @@ -204,6 +204,10 @@ static int snd_usb_copy_string_desc(struct mixer_build *state,
3618     int index, char *buf, int maxlen)
3619     {
3620     int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
3621     +
3622     + if (len < 0)
3623     + return 0;
3624     +
3625     buf[len] = 0;
3626     return len;
3627     }
3628     @@ -2168,13 +2172,14 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
3629     if (len)
3630     ;
3631     else if (nameid)
3632     - snd_usb_copy_string_desc(state, nameid, kctl->id.name,
3633     + len = snd_usb_copy_string_desc(state, nameid, kctl->id.name,
3634     sizeof(kctl->id.name));
3635     - else {
3636     + else
3637     len = get_term_name(state, &state->oterm,
3638     kctl->id.name, sizeof(kctl->id.name), 0);
3639     - if (!len)
3640     - strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
3641     +
3642     + if (!len) {
3643     + strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
3644    
3645     if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
3646     append_ctl_name(kctl, " Clock Source");
3647     diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
3648     index bc7adb84e679..60a94b3e532e 100644
3649     --- a/tools/hv/hv_kvp_daemon.c
3650     +++ b/tools/hv/hv_kvp_daemon.c
3651     @@ -193,11 +193,14 @@ static void kvp_update_mem_state(int pool)
3652     for (;;) {
3653     readp = &record[records_read];
3654     records_read += fread(readp, sizeof(struct kvp_record),
3655     - ENTRIES_PER_BLOCK * num_blocks,
3656     - filep);
3657     + ENTRIES_PER_BLOCK * num_blocks - records_read,
3658     + filep);
3659    
3660     if (ferror(filep)) {
3661     - syslog(LOG_ERR, "Failed to read file, pool: %d", pool);
3662     + syslog(LOG_ERR,
3663     + "Failed to read file, pool: %d; error: %d %s",
3664     + pool, errno, strerror(errno));
3665     + kvp_release_lock(pool);
3666     exit(EXIT_FAILURE);
3667     }
3668    
3669     @@ -210,6 +213,7 @@ static void kvp_update_mem_state(int pool)
3670    
3671     if (record == NULL) {
3672     syslog(LOG_ERR, "malloc failed");
3673     + kvp_release_lock(pool);
3674     exit(EXIT_FAILURE);
3675     }
3676     continue;
3677     @@ -224,15 +228,11 @@ static void kvp_update_mem_state(int pool)
3678     fclose(filep);
3679     kvp_release_lock(pool);
3680     }
3681     +
3682     static int kvp_file_init(void)
3683     {
3684     int fd;
3685     - FILE *filep;
3686     - size_t records_read;
3687     char *fname;
3688     - struct kvp_record *record;
3689     - struct kvp_record *readp;
3690     - int num_blocks;
3691     int i;
3692     int alloc_unit = sizeof(struct kvp_record) * ENTRIES_PER_BLOCK;
3693    
3694     @@ -246,61 +246,19 @@ static int kvp_file_init(void)
3695    
3696     for (i = 0; i < KVP_POOL_COUNT; i++) {
3697     fname = kvp_file_info[i].fname;
3698     - records_read = 0;
3699     - num_blocks = 1;
3700     sprintf(fname, "%s/.kvp_pool_%d", KVP_CONFIG_LOC, i);
3701     fd = open(fname, O_RDWR | O_CREAT | O_CLOEXEC, 0644 /* rw-r--r-- */);
3702    
3703     if (fd == -1)
3704     return 1;
3705    
3706     -
3707     - filep = fopen(fname, "re");
3708     - if (!filep) {
3709     - close(fd);
3710     - return 1;
3711     - }
3712     -
3713     - record = malloc(alloc_unit * num_blocks);
3714     - if (record == NULL) {
3715     - fclose(filep);
3716     - close(fd);
3717     - return 1;
3718     - }
3719     - for (;;) {
3720     - readp = &record[records_read];
3721     - records_read += fread(readp, sizeof(struct kvp_record),
3722     - ENTRIES_PER_BLOCK,
3723     - filep);
3724     -
3725     - if (ferror(filep)) {
3726     - syslog(LOG_ERR, "Failed to read file, pool: %d",
3727     - i);
3728     - exit(EXIT_FAILURE);
3729     - }
3730     -
3731     - if (!feof(filep)) {
3732     - /*
3733     - * We have more data to read.
3734     - */
3735     - num_blocks++;
3736     - record = realloc(record, alloc_unit *
3737     - num_blocks);
3738     - if (record == NULL) {
3739     - fclose(filep);
3740     - close(fd);
3741     - return 1;
3742     - }
3743     - continue;
3744     - }
3745     - break;
3746     - }
3747     kvp_file_info[i].fd = fd;
3748     - kvp_file_info[i].num_blocks = num_blocks;
3749     - kvp_file_info[i].records = record;
3750     - kvp_file_info[i].num_records = records_read;
3751     - fclose(filep);
3752     -
3753     + kvp_file_info[i].num_blocks = 1;
3754     + kvp_file_info[i].records = malloc(alloc_unit);
3755     + if (kvp_file_info[i].records == NULL)
3756     + return 1;
3757     + kvp_file_info[i].num_records = 0;
3758     + kvp_update_mem_state(i);
3759     }
3760    
3761     return 0;
3762     diff --git a/tools/testing/selftests/powerpc/harness.c b/tools/testing/selftests/powerpc/harness.c
3763     index 248a820048df..66d31de60b9a 100644
3764     --- a/tools/testing/selftests/powerpc/harness.c
3765     +++ b/tools/testing/selftests/powerpc/harness.c
3766     @@ -114,9 +114,11 @@ int test_harness(int (test_function)(void), char *name)
3767    
3768     rc = run_test(test_function, name);
3769    
3770     - if (rc == MAGIC_SKIP_RETURN_VALUE)
3771     + if (rc == MAGIC_SKIP_RETURN_VALUE) {
3772     test_skip(name);
3773     - else
3774     + /* so that skipped test is not marked as failed */
3775     + rc = 0;
3776     + } else
3777     test_finish(name, rc);
3778    
3779     return rc;
3780     diff --git a/tools/testing/selftests/x86/fsgsbase.c b/tools/testing/selftests/x86/fsgsbase.c
3781     index 9b4610c6d3fb..f249e042b3b5 100644
3782     --- a/tools/testing/selftests/x86/fsgsbase.c
3783     +++ b/tools/testing/selftests/x86/fsgsbase.c
3784     @@ -245,7 +245,7 @@ void do_unexpected_base(void)
3785     long ret;
3786     asm volatile ("int $0x80"
3787     : "=a" (ret) : "a" (243), "b" (low_desc)
3788     - : "flags");
3789     + : "r8", "r9", "r10", "r11");
3790     memcpy(&desc, low_desc, sizeof(desc));
3791     munmap(low_desc, sizeof(desc));
3792    
3793     diff --git a/tools/testing/selftests/x86/ldt_gdt.c b/tools/testing/selftests/x86/ldt_gdt.c
3794     index f936a3cd3e35..ac1a7a3f87b2 100644
3795     --- a/tools/testing/selftests/x86/ldt_gdt.c
3796     +++ b/tools/testing/selftests/x86/ldt_gdt.c
3797     @@ -45,6 +45,12 @@
3798     #define AR_DB (1 << 22)
3799     #define AR_G (1 << 23)
3800    
3801     +#ifdef __x86_64__
3802     +# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
3803     +#else
3804     +# define INT80_CLOBBERS
3805     +#endif
3806     +
3807     static int nerrs;
3808    
3809     /* Points to an array of 1024 ints, each holding its own index. */
3810     @@ -649,7 +655,7 @@ static int invoke_set_thread_area(void)
3811     asm volatile ("int $0x80"
3812     : "=a" (ret), "+m" (low_user_desc) :
3813     "a" (243), "b" (low_user_desc)
3814     - : "flags");
3815     + : INT80_CLOBBERS);
3816     return ret;
3817     }
3818    
3819     @@ -718,7 +724,7 @@ static void test_gdt_invalidation(void)
3820     "+a" (eax)
3821     : "m" (low_user_desc_clear),
3822     [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
3823     - : "flags");
3824     + : INT80_CLOBBERS);
3825    
3826     if (sel != 0) {
3827     result = "FAIL";
3828     @@ -749,7 +755,7 @@ static void test_gdt_invalidation(void)
3829     "+a" (eax)
3830     : "m" (low_user_desc_clear),
3831     [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
3832     - : "flags");
3833     + : INT80_CLOBBERS);
3834    
3835     if (sel != 0) {
3836     result = "FAIL";
3837     @@ -782,7 +788,7 @@ static void test_gdt_invalidation(void)
3838     "+a" (eax)
3839     : "m" (low_user_desc_clear),
3840     [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
3841     - : "flags");
3842     + : INT80_CLOBBERS);
3843    
3844     #ifdef __x86_64__
3845     syscall(SYS_arch_prctl, ARCH_GET_FS, &new_base);
3846     @@ -835,7 +841,7 @@ static void test_gdt_invalidation(void)
3847     "+a" (eax)
3848     : "m" (low_user_desc_clear),
3849     [arg1] "r" ((unsigned int)(unsigned long)low_user_desc_clear)
3850     - : "flags");
3851     + : INT80_CLOBBERS);
3852    
3853     #ifdef __x86_64__
3854     syscall(SYS_arch_prctl, ARCH_GET_GS, &new_base);
3855     diff --git a/tools/testing/selftests/x86/mpx-hw.h b/tools/testing/selftests/x86/mpx-hw.h
3856     index 093c190178a9..28b3c7c553a4 100644
3857     --- a/tools/testing/selftests/x86/mpx-hw.h
3858     +++ b/tools/testing/selftests/x86/mpx-hw.h
3859     @@ -51,14 +51,14 @@
3860     struct mpx_bd_entry {
3861     union {
3862     char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
3863     - void *contents[1];
3864     + void *contents[0];
3865     };
3866     } __attribute__((packed));
3867    
3868     struct mpx_bt_entry {
3869     union {
3870     char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
3871     - unsigned long contents[1];
3872     + unsigned long contents[0];
3873     };
3874     } __attribute__((packed));
3875    
3876     diff --git a/tools/testing/selftests/x86/ptrace_syscall.c b/tools/testing/selftests/x86/ptrace_syscall.c
3877     index b037ce9cf116..eaea92439708 100644
3878     --- a/tools/testing/selftests/x86/ptrace_syscall.c
3879     +++ b/tools/testing/selftests/x86/ptrace_syscall.c
3880     @@ -58,7 +58,8 @@ static void do_full_int80(struct syscall_args32 *args)
3881     asm volatile ("int $0x80"
3882     : "+a" (args->nr),
3883     "+b" (args->arg0), "+c" (args->arg1), "+d" (args->arg2),
3884     - "+S" (args->arg3), "+D" (args->arg4), "+r" (bp));
3885     + "+S" (args->arg3), "+D" (args->arg4), "+r" (bp)
3886     + : : "r8", "r9", "r10", "r11");
3887     args->arg5 = bp;
3888     #else
3889     sys32_helper(args, int80_and_ret);
3890     diff --git a/tools/testing/selftests/x86/single_step_syscall.c b/tools/testing/selftests/x86/single_step_syscall.c
3891     index 50c26358e8b7..a48da95c18fd 100644
3892     --- a/tools/testing/selftests/x86/single_step_syscall.c
3893     +++ b/tools/testing/selftests/x86/single_step_syscall.c
3894     @@ -56,9 +56,11 @@ static volatile sig_atomic_t sig_traps;
3895     #ifdef __x86_64__
3896     # define REG_IP REG_RIP
3897     # define WIDTH "q"
3898     +# define INT80_CLOBBERS "r8", "r9", "r10", "r11"
3899     #else
3900     # define REG_IP REG_EIP
3901     # define WIDTH "l"
3902     +# define INT80_CLOBBERS
3903     #endif
3904    
3905     static unsigned long get_eflags(void)
3906     @@ -140,7 +142,8 @@ int main()
3907    
3908     printf("[RUN]\tSet TF and check int80\n");
3909     set_eflags(get_eflags() | X86_EFLAGS_TF);
3910     - asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid));
3911     + asm volatile ("int $0x80" : "=a" (tmp) : "a" (SYS_getpid)
3912     + : INT80_CLOBBERS);
3913     check_result();
3914    
3915     /*
3916     diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
3917     index c8aeb7b91ec8..95021246ee26 100644
3918     --- a/virt/kvm/arm/hyp/vgic-v2-sr.c
3919     +++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
3920     @@ -77,11 +77,7 @@ static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
3921     else
3922     elrsr1 = 0;
3923    
3924     -#ifdef CONFIG_CPU_BIG_ENDIAN
3925     - cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
3926     -#else
3927     cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
3928     -#endif
3929     }
3930    
3931     static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
3932     diff --git a/virt/kvm/arm/vgic/vgic-irqfd.c b/virt/kvm/arm/vgic/vgic-irqfd.c
3933     index f138ed2e9c63..a26c6773d6df 100644
3934     --- a/virt/kvm/arm/vgic/vgic-irqfd.c
3935     +++ b/virt/kvm/arm/vgic/vgic-irqfd.c
3936     @@ -112,8 +112,7 @@ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm)
3937     u32 nr = dist->nr_spis;
3938     int i, ret;
3939    
3940     - entries = kcalloc(nr, sizeof(struct kvm_kernel_irq_routing_entry),
3941     - GFP_KERNEL);
3942     + entries = kcalloc(nr, sizeof(*entries), GFP_KERNEL);
3943     if (!entries)
3944     return -ENOMEM;
3945    
3946     diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
3947     index 4660a7d04eea..ebcaf4641d2b 100644
3948     --- a/virt/kvm/arm/vgic/vgic-its.c
3949     +++ b/virt/kvm/arm/vgic/vgic-its.c
3950     @@ -360,29 +360,6 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu)
3951     return ret;
3952     }
3953    
3954     -static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
3955     - struct vgic_its *its,
3956     - gpa_t addr, unsigned int len)
3957     -{
3958     - u32 reg = 0;
3959     -
3960     - mutex_lock(&its->cmd_lock);
3961     - if (its->creadr == its->cwriter)
3962     - reg |= GITS_CTLR_QUIESCENT;
3963     - if (its->enabled)
3964     - reg |= GITS_CTLR_ENABLE;
3965     - mutex_unlock(&its->cmd_lock);
3966     -
3967     - return reg;
3968     -}
3969     -
3970     -static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
3971     - gpa_t addr, unsigned int len,
3972     - unsigned long val)
3973     -{
3974     - its->enabled = !!(val & GITS_CTLR_ENABLE);
3975     -}
3976     -
3977     static unsigned long vgic_mmio_read_its_typer(struct kvm *kvm,
3978     struct vgic_its *its,
3979     gpa_t addr, unsigned int len)
3980     @@ -687,6 +664,8 @@ static int vgic_its_alloc_collection(struct vgic_its *its,
3981     return E_ITS_MAPC_COLLECTION_OOR;
3982    
3983     collection = kzalloc(sizeof(*collection), GFP_KERNEL);
3984     + if (!collection)
3985     + return -ENOMEM;
3986    
3987     collection->collection_id = coll_id;
3988     collection->target_addr = COLLECTION_NOT_MAPPED;
3989     @@ -1160,33 +1139,16 @@ static void vgic_mmio_write_its_cbaser(struct kvm *kvm, struct vgic_its *its,
3990     #define ITS_CMD_SIZE 32
3991     #define ITS_CMD_OFFSET(reg) ((reg) & GENMASK(19, 5))
3992    
3993     -/*
3994     - * By writing to CWRITER the guest announces new commands to be processed.
3995     - * To avoid any races in the first place, we take the its_cmd lock, which
3996     - * protects our ring buffer variables, so that there is only one user
3997     - * per ITS handling commands at a given time.
3998     - */
3999     -static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
4000     - gpa_t addr, unsigned int len,
4001     - unsigned long val)
4002     +/* Must be called with the cmd_lock held. */
4003     +static void vgic_its_process_commands(struct kvm *kvm, struct vgic_its *its)
4004     {
4005     gpa_t cbaser;
4006     u64 cmd_buf[4];
4007     - u32 reg;
4008     -
4009     - if (!its)
4010     - return;
4011     -
4012     - mutex_lock(&its->cmd_lock);
4013    
4014     - reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
4015     - reg = ITS_CMD_OFFSET(reg);
4016     - if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
4017     - mutex_unlock(&its->cmd_lock);
4018     + /* Commands are only processed when the ITS is enabled. */
4019     + if (!its->enabled)
4020     return;
4021     - }
4022    
4023     - its->cwriter = reg;
4024     cbaser = CBASER_ADDRESS(its->cbaser);
4025    
4026     while (its->cwriter != its->creadr) {
4027     @@ -1206,6 +1168,34 @@ static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
4028     if (its->creadr == ITS_CMD_BUFFER_SIZE(its->cbaser))
4029     its->creadr = 0;
4030     }
4031     +}
4032     +
4033     +/*
4034     + * By writing to CWRITER the guest announces new commands to be processed.
4035     + * To avoid any races in the first place, we take the its_cmd lock, which
4036     + * protects our ring buffer variables, so that there is only one user
4037     + * per ITS handling commands at a given time.
4038     + */
4039     +static void vgic_mmio_write_its_cwriter(struct kvm *kvm, struct vgic_its *its,
4040     + gpa_t addr, unsigned int len,
4041     + unsigned long val)
4042     +{
4043     + u64 reg;
4044     +
4045     + if (!its)
4046     + return;
4047     +
4048     + mutex_lock(&its->cmd_lock);
4049     +
4050     + reg = update_64bit_reg(its->cwriter, addr & 7, len, val);
4051     + reg = ITS_CMD_OFFSET(reg);
4052     + if (reg >= ITS_CMD_BUFFER_SIZE(its->cbaser)) {
4053     + mutex_unlock(&its->cmd_lock);
4054     + return;
4055     + }
4056     + its->cwriter = reg;
4057     +
4058     + vgic_its_process_commands(kvm, its);
4059    
4060     mutex_unlock(&its->cmd_lock);
4061     }
4062     @@ -1286,6 +1276,39 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm,
4063     *regptr = reg;
4064     }
4065    
4066     +static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu,
4067     + struct vgic_its *its,
4068     + gpa_t addr, unsigned int len)
4069     +{
4070     + u32 reg = 0;
4071     +
4072     + mutex_lock(&its->cmd_lock);
4073     + if (its->creadr == its->cwriter)
4074     + reg |= GITS_CTLR_QUIESCENT;
4075     + if (its->enabled)
4076     + reg |= GITS_CTLR_ENABLE;
4077     + mutex_unlock(&its->cmd_lock);
4078     +
4079     + return reg;
4080     +}
4081     +
4082     +static void vgic_mmio_write_its_ctlr(struct kvm *kvm, struct vgic_its *its,
4083     + gpa_t addr, unsigned int len,
4084     + unsigned long val)
4085     +{
4086     + mutex_lock(&its->cmd_lock);
4087     +
4088     + its->enabled = !!(val & GITS_CTLR_ENABLE);
4089     +
4090     + /*
4091     + * Try to process any pending commands. This function bails out early
4092     + * if the ITS is disabled or no commands have been queued.
4093     + */
4094     + vgic_its_process_commands(kvm, its);
4095     +
4096     + mutex_unlock(&its->cmd_lock);
4097     +}
4098     +
4099     #define REGISTER_ITS_DESC(off, rd, wr, length, acc) \
4100     { \
4101     .reg_offset = off, \
4102     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
4103     index f4c6d4f6d2e8..4569fdcab701 100644
4104     --- a/virt/kvm/kvm_main.c
4105     +++ b/virt/kvm/kvm_main.c
4106     @@ -125,6 +125,11 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
4107    
4108     static bool largepages_enabled = true;
4109    
4110     +__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
4111     + unsigned long start, unsigned long end)
4112     +{
4113     +}
4114     +
4115     bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
4116     {
4117     if (pfn_valid(pfn))
4118     @@ -361,6 +366,9 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
4119     kvm_flush_remote_tlbs(kvm);
4120    
4121     spin_unlock(&kvm->mmu_lock);
4122     +
4123     + kvm_arch_mmu_notifier_invalidate_range(kvm, start, end);
4124     +
4125     srcu_read_unlock(&kvm->srcu, idx);
4126     }
4127