Magellan Linux

Annotation of /trunk/kernel-alx/patches-4.14/0116-4.14.17-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3238 - (hide annotations) (download)
Fri Nov 9 12:14:58 2018 UTC (5 years, 6 months ago) by niro
File size: 197605 byte(s)
-added up to patches-4.14.79
1 niro 3238 diff --git a/Makefile b/Makefile
2     index 90a4bffa8446..7ed993896dd5 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,7 +1,7 @@
6     # SPDX-License-Identifier: GPL-2.0
7     VERSION = 4
8     PATCHLEVEL = 14
9     -SUBLEVEL = 16
10     +SUBLEVEL = 17
11     EXTRAVERSION =
12     NAME = Petit Gorille
13    
14     diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
15     index dff66974feed..d5f5e92e7488 100644
16     --- a/arch/arm/boot/dts/bcm-nsp.dtsi
17     +++ b/arch/arm/boot/dts/bcm-nsp.dtsi
18     @@ -85,7 +85,7 @@
19     timer@20200 {
20     compatible = "arm,cortex-a9-global-timer";
21     reg = <0x20200 0x100>;
22     - interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
23     + interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
24     clocks = <&periph_clk>;
25     };
26    
27     @@ -93,7 +93,7 @@
28     compatible = "arm,cortex-a9-twd-timer";
29     reg = <0x20600 0x20>;
30     interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
31     - IRQ_TYPE_LEVEL_HIGH)>;
32     + IRQ_TYPE_EDGE_RISING)>;
33     clocks = <&periph_clk>;
34     };
35    
36     diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts
37     index 3bc50849d013..b8bde13de90a 100644
38     --- a/arch/arm/boot/dts/bcm958623hr.dts
39     +++ b/arch/arm/boot/dts/bcm958623hr.dts
40     @@ -141,10 +141,6 @@
41     status = "okay";
42     };
43    
44     -&sata {
45     - status = "okay";
46     -};
47     -
48     &qspi {
49     bspi-sel = <0>;
50     flash: m25p80@0 {
51     diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts
52     index d94d14b3c745..6a44b8021702 100644
53     --- a/arch/arm/boot/dts/bcm958625hr.dts
54     +++ b/arch/arm/boot/dts/bcm958625hr.dts
55     @@ -177,10 +177,6 @@
56     status = "okay";
57     };
58    
59     -&sata {
60     - status = "okay";
61     -};
62     -
63     &srab {
64     compatible = "brcm,bcm58625-srab", "brcm,nsp-srab";
65     status = "okay";
66     diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
67     index d535edc01434..75fdeaa8c62f 100644
68     --- a/arch/mips/kvm/mips.c
69     +++ b/arch/mips/kvm/mips.c
70     @@ -445,10 +445,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
71     int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
72     {
73     int r = -EINTR;
74     - sigset_t sigsaved;
75    
76     - if (vcpu->sigset_active)
77     - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
78     + kvm_sigset_activate(vcpu);
79    
80     if (vcpu->mmio_needed) {
81     if (!vcpu->mmio_is_write)
82     @@ -480,8 +478,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
83     local_irq_enable();
84    
85     out:
86     - if (vcpu->sigset_active)
87     - sigprocmask(SIG_SETMASK, &sigsaved, NULL);
88     + kvm_sigset_deactivate(vcpu);
89    
90     return r;
91     }
92     diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
93     index ee279c7f4802..2b02d51d14d8 100644
94     --- a/arch/powerpc/kvm/powerpc.c
95     +++ b/arch/powerpc/kvm/powerpc.c
96     @@ -1407,7 +1407,6 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
97     int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
98     {
99     int r;
100     - sigset_t sigsaved;
101    
102     if (vcpu->mmio_needed) {
103     vcpu->mmio_needed = 0;
104     @@ -1448,16 +1447,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
105     #endif
106     }
107    
108     - if (vcpu->sigset_active)
109     - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
110     + kvm_sigset_activate(vcpu);
111    
112     if (run->immediate_exit)
113     r = -EINTR;
114     else
115     r = kvmppc_vcpu_run(run, vcpu);
116    
117     - if (vcpu->sigset_active)
118     - sigprocmask(SIG_SETMASK, &sigsaved, NULL);
119     + kvm_sigset_deactivate(vcpu);
120    
121     return r;
122     }
123     diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
124     index 43607bb12cc2..a6cc744ff5fb 100644
125     --- a/arch/s390/include/asm/mmu_context.h
126     +++ b/arch/s390/include/asm/mmu_context.h
127     @@ -28,7 +28,7 @@ static inline int init_new_context(struct task_struct *tsk,
128     #ifdef CONFIG_PGSTE
129     mm->context.alloc_pgste = page_table_allocate_pgste ||
130     test_thread_flag(TIF_PGSTE) ||
131     - current->mm->context.alloc_pgste;
132     + (current->mm && current->mm->context.alloc_pgste);
133     mm->context.has_pgste = 0;
134     mm->context.use_skey = 0;
135     mm->context.use_cmma = 0;
136     diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
137     index 55de4eb73604..de0a8b17bcaa 100644
138     --- a/arch/s390/include/asm/topology.h
139     +++ b/arch/s390/include/asm/topology.h
140     @@ -51,6 +51,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
141     static inline void topology_init_early(void) { }
142     static inline void topology_schedule_update(void) { }
143     static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
144     +static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
145     static inline void topology_expect_change(void) { }
146    
147     #endif /* CONFIG_SCHED_TOPOLOGY */
148     diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
149     index 092c4154abd7..7ffaf9fd6d19 100644
150     --- a/arch/s390/kernel/smp.c
151     +++ b/arch/s390/kernel/smp.c
152     @@ -54,6 +54,7 @@
153     #include <asm/sigp.h>
154     #include <asm/idle.h>
155     #include <asm/nmi.h>
156     +#include <asm/topology.h>
157     #include "entry.h"
158    
159     enum {
160     diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
161     index 6c88cb18ace2..6e3d80b2048e 100644
162     --- a/arch/s390/kvm/kvm-s390.c
163     +++ b/arch/s390/kvm/kvm-s390.c
164     @@ -3378,7 +3378,6 @@ static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
165     int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
166     {
167     int rc;
168     - sigset_t sigsaved;
169    
170     if (kvm_run->immediate_exit)
171     return -EINTR;
172     @@ -3388,8 +3387,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
173     return 0;
174     }
175    
176     - if (vcpu->sigset_active)
177     - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
178     + kvm_sigset_activate(vcpu);
179    
180     if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
181     kvm_s390_vcpu_start(vcpu);
182     @@ -3423,8 +3421,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
183     disable_cpu_timer_accounting(vcpu);
184     store_regs(vcpu, kvm_run);
185    
186     - if (vcpu->sigset_active)
187     - sigprocmask(SIG_SETMASK, &sigsaved, NULL);
188     + kvm_sigset_deactivate(vcpu);
189    
190     vcpu->stat.exit_userspace++;
191     return rc;
192     diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
193     index 3d09e3aca18d..12e8484a8ee7 100644
194     --- a/arch/x86/crypto/aesni-intel_asm.S
195     +++ b/arch/x86/crypto/aesni-intel_asm.S
196     @@ -90,30 +90,6 @@ SHIFT_MASK: .octa 0x0f0e0d0c0b0a09080706050403020100
197     ALL_F: .octa 0xffffffffffffffffffffffffffffffff
198     .octa 0x00000000000000000000000000000000
199    
200     -.section .rodata
201     -.align 16
202     -.type aad_shift_arr, @object
203     -.size aad_shift_arr, 272
204     -aad_shift_arr:
205     - .octa 0xffffffffffffffffffffffffffffffff
206     - .octa 0xffffffffffffffffffffffffffffff0C
207     - .octa 0xffffffffffffffffffffffffffff0D0C
208     - .octa 0xffffffffffffffffffffffffff0E0D0C
209     - .octa 0xffffffffffffffffffffffff0F0E0D0C
210     - .octa 0xffffffffffffffffffffff0C0B0A0908
211     - .octa 0xffffffffffffffffffff0D0C0B0A0908
212     - .octa 0xffffffffffffffffff0E0D0C0B0A0908
213     - .octa 0xffffffffffffffff0F0E0D0C0B0A0908
214     - .octa 0xffffffffffffff0C0B0A090807060504
215     - .octa 0xffffffffffff0D0C0B0A090807060504
216     - .octa 0xffffffffff0E0D0C0B0A090807060504
217     - .octa 0xffffffff0F0E0D0C0B0A090807060504
218     - .octa 0xffffff0C0B0A09080706050403020100
219     - .octa 0xffff0D0C0B0A09080706050403020100
220     - .octa 0xff0E0D0C0B0A09080706050403020100
221     - .octa 0x0F0E0D0C0B0A09080706050403020100
222     -
223     -
224     .text
225    
226    
227     @@ -257,6 +233,37 @@ aad_shift_arr:
228     pxor \TMP1, \GH # result is in TMP1
229     .endm
230    
231     +# Reads DLEN bytes starting at DPTR and stores in XMMDst
232     +# where 0 < DLEN < 16
233     +# Clobbers %rax, DLEN and XMM1
234     +.macro READ_PARTIAL_BLOCK DPTR DLEN XMM1 XMMDst
235     + cmp $8, \DLEN
236     + jl _read_lt8_\@
237     + mov (\DPTR), %rax
238     + MOVQ_R64_XMM %rax, \XMMDst
239     + sub $8, \DLEN
240     + jz _done_read_partial_block_\@
241     + xor %eax, %eax
242     +_read_next_byte_\@:
243     + shl $8, %rax
244     + mov 7(\DPTR, \DLEN, 1), %al
245     + dec \DLEN
246     + jnz _read_next_byte_\@
247     + MOVQ_R64_XMM %rax, \XMM1
248     + pslldq $8, \XMM1
249     + por \XMM1, \XMMDst
250     + jmp _done_read_partial_block_\@
251     +_read_lt8_\@:
252     + xor %eax, %eax
253     +_read_next_byte_lt8_\@:
254     + shl $8, %rax
255     + mov -1(\DPTR, \DLEN, 1), %al
256     + dec \DLEN
257     + jnz _read_next_byte_lt8_\@
258     + MOVQ_R64_XMM %rax, \XMMDst
259     +_done_read_partial_block_\@:
260     +.endm
261     +
262     /*
263     * if a = number of total plaintext bytes
264     * b = floor(a/16)
265     @@ -273,62 +280,30 @@ aad_shift_arr:
266     XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
267     MOVADQ SHUF_MASK(%rip), %xmm14
268     mov arg7, %r10 # %r10 = AAD
269     - mov arg8, %r12 # %r12 = aadLen
270     - mov %r12, %r11
271     + mov arg8, %r11 # %r11 = aadLen
272     pxor %xmm\i, %xmm\i
273     pxor \XMM2, \XMM2
274    
275     cmp $16, %r11
276     - jl _get_AAD_rest8\num_initial_blocks\operation
277     + jl _get_AAD_rest\num_initial_blocks\operation
278     _get_AAD_blocks\num_initial_blocks\operation:
279     movdqu (%r10), %xmm\i
280     PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
281     pxor %xmm\i, \XMM2
282     GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
283     add $16, %r10
284     - sub $16, %r12
285     sub $16, %r11
286     cmp $16, %r11
287     jge _get_AAD_blocks\num_initial_blocks\operation
288    
289     movdqu \XMM2, %xmm\i
290     +
291     + /* read the last <16B of AAD */
292     +_get_AAD_rest\num_initial_blocks\operation:
293     cmp $0, %r11
294     je _get_AAD_done\num_initial_blocks\operation
295    
296     - pxor %xmm\i,%xmm\i
297     -
298     - /* read the last <16B of AAD. since we have at least 4B of
299     - data right after the AAD (the ICV, and maybe some CT), we can
300     - read 4B/8B blocks safely, and then get rid of the extra stuff */
301     -_get_AAD_rest8\num_initial_blocks\operation:
302     - cmp $4, %r11
303     - jle _get_AAD_rest4\num_initial_blocks\operation
304     - movq (%r10), \TMP1
305     - add $8, %r10
306     - sub $8, %r11
307     - pslldq $8, \TMP1
308     - psrldq $8, %xmm\i
309     - pxor \TMP1, %xmm\i
310     - jmp _get_AAD_rest8\num_initial_blocks\operation
311     -_get_AAD_rest4\num_initial_blocks\operation:
312     - cmp $0, %r11
313     - jle _get_AAD_rest0\num_initial_blocks\operation
314     - mov (%r10), %eax
315     - movq %rax, \TMP1
316     - add $4, %r10
317     - sub $4, %r10
318     - pslldq $12, \TMP1
319     - psrldq $4, %xmm\i
320     - pxor \TMP1, %xmm\i
321     -_get_AAD_rest0\num_initial_blocks\operation:
322     - /* finalize: shift out the extra bytes we read, and align
323     - left. since pslldq can only shift by an immediate, we use
324     - vpshufb and an array of shuffle masks */
325     - movq %r12, %r11
326     - salq $4, %r11
327     - movdqu aad_shift_arr(%r11), \TMP1
328     - PSHUFB_XMM \TMP1, %xmm\i
329     -_get_AAD_rest_final\num_initial_blocks\operation:
330     + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
331     PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
332     pxor \XMM2, %xmm\i
333     GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
334     @@ -532,62 +507,30 @@ _initial_blocks_done\num_initial_blocks\operation:
335     XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
336     MOVADQ SHUF_MASK(%rip), %xmm14
337     mov arg7, %r10 # %r10 = AAD
338     - mov arg8, %r12 # %r12 = aadLen
339     - mov %r12, %r11
340     + mov arg8, %r11 # %r11 = aadLen
341     pxor %xmm\i, %xmm\i
342     pxor \XMM2, \XMM2
343    
344     cmp $16, %r11
345     - jl _get_AAD_rest8\num_initial_blocks\operation
346     + jl _get_AAD_rest\num_initial_blocks\operation
347     _get_AAD_blocks\num_initial_blocks\operation:
348     movdqu (%r10), %xmm\i
349     PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
350     pxor %xmm\i, \XMM2
351     GHASH_MUL \XMM2, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
352     add $16, %r10
353     - sub $16, %r12
354     sub $16, %r11
355     cmp $16, %r11
356     jge _get_AAD_blocks\num_initial_blocks\operation
357    
358     movdqu \XMM2, %xmm\i
359     +
360     + /* read the last <16B of AAD */
361     +_get_AAD_rest\num_initial_blocks\operation:
362     cmp $0, %r11
363     je _get_AAD_done\num_initial_blocks\operation
364    
365     - pxor %xmm\i,%xmm\i
366     -
367     - /* read the last <16B of AAD. since we have at least 4B of
368     - data right after the AAD (the ICV, and maybe some PT), we can
369     - read 4B/8B blocks safely, and then get rid of the extra stuff */
370     -_get_AAD_rest8\num_initial_blocks\operation:
371     - cmp $4, %r11
372     - jle _get_AAD_rest4\num_initial_blocks\operation
373     - movq (%r10), \TMP1
374     - add $8, %r10
375     - sub $8, %r11
376     - pslldq $8, \TMP1
377     - psrldq $8, %xmm\i
378     - pxor \TMP1, %xmm\i
379     - jmp _get_AAD_rest8\num_initial_blocks\operation
380     -_get_AAD_rest4\num_initial_blocks\operation:
381     - cmp $0, %r11
382     - jle _get_AAD_rest0\num_initial_blocks\operation
383     - mov (%r10), %eax
384     - movq %rax, \TMP1
385     - add $4, %r10
386     - sub $4, %r10
387     - pslldq $12, \TMP1
388     - psrldq $4, %xmm\i
389     - pxor \TMP1, %xmm\i
390     -_get_AAD_rest0\num_initial_blocks\operation:
391     - /* finalize: shift out the extra bytes we read, and align
392     - left. since pslldq can only shift by an immediate, we use
393     - vpshufb and an array of shuffle masks */
394     - movq %r12, %r11
395     - salq $4, %r11
396     - movdqu aad_shift_arr(%r11), \TMP1
397     - PSHUFB_XMM \TMP1, %xmm\i
398     -_get_AAD_rest_final\num_initial_blocks\operation:
399     + READ_PARTIAL_BLOCK %r10, %r11, \TMP1, %xmm\i
400     PSHUFB_XMM %xmm14, %xmm\i # byte-reflect the AAD data
401     pxor \XMM2, %xmm\i
402     GHASH_MUL %xmm\i, \TMP3, \TMP1, \TMP2, \TMP4, \TMP5, \XMM1
403     @@ -1386,14 +1329,6 @@ _esb_loop_\@:
404     *
405     * AAD Format with 64-bit Extended Sequence Number
406     *
407     -* aadLen:
408     -* from the definition of the spec, aadLen can only be 8 or 12 bytes.
409     -* The code supports 16 too but for other sizes, the code will fail.
410     -*
411     -* TLen:
412     -* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
413     -* For other sizes, the code will fail.
414     -*
415     * poly = x^128 + x^127 + x^126 + x^121 + 1
416     *
417     *****************************************************************************/
418     @@ -1487,19 +1422,16 @@ _zero_cipher_left_decrypt:
419     PSHUFB_XMM %xmm10, %xmm0
420    
421     ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # E(K, Yn)
422     - sub $16, %r11
423     - add %r13, %r11
424     - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
425     - lea SHIFT_MASK+16(%rip), %r12
426     - sub %r13, %r12
427     -# adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
428     -# (%r13 is the number of bytes in plaintext mod 16)
429     - movdqu (%r12), %xmm2 # get the appropriate shuffle mask
430     - PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
431    
432     + lea (%arg3,%r11,1), %r10
433     + mov %r13, %r12
434     + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
435     +
436     + lea ALL_F+16(%rip), %r12
437     + sub %r13, %r12
438     movdqa %xmm1, %xmm2
439     pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
440     - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
441     + movdqu (%r12), %xmm1
442     # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
443     pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
444     pand %xmm1, %xmm2
445     @@ -1508,9 +1440,6 @@ _zero_cipher_left_decrypt:
446    
447     pxor %xmm2, %xmm8
448     GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
449     - # GHASH computation for the last <16 byte block
450     - sub %r13, %r11
451     - add $16, %r11
452    
453     # output %r13 bytes
454     MOVQ_R64_XMM %xmm0, %rax
455     @@ -1664,14 +1593,6 @@ ENDPROC(aesni_gcm_dec)
456     *
457     * AAD Format with 64-bit Extended Sequence Number
458     *
459     -* aadLen:
460     -* from the definition of the spec, aadLen can only be 8 or 12 bytes.
461     -* The code supports 16 too but for other sizes, the code will fail.
462     -*
463     -* TLen:
464     -* from the definition of the spec, TLen can only be 8, 12 or 16 bytes.
465     -* For other sizes, the code will fail.
466     -*
467     * poly = x^128 + x^127 + x^126 + x^121 + 1
468     ***************************************************************************/
469     ENTRY(aesni_gcm_enc)
470     @@ -1764,19 +1685,16 @@ _zero_cipher_left_encrypt:
471     movdqa SHUF_MASK(%rip), %xmm10
472     PSHUFB_XMM %xmm10, %xmm0
473    
474     -
475     ENCRYPT_SINGLE_BLOCK %xmm0, %xmm1 # Encrypt(K, Yn)
476     - sub $16, %r11
477     - add %r13, %r11
478     - movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
479     - lea SHIFT_MASK+16(%rip), %r12
480     +
481     + lea (%arg3,%r11,1), %r10
482     + mov %r13, %r12
483     + READ_PARTIAL_BLOCK %r10 %r12 %xmm2 %xmm1
484     +
485     + lea ALL_F+16(%rip), %r12
486     sub %r13, %r12
487     - # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
488     - # (%r13 is the number of bytes in plaintext mod 16)
489     - movdqu (%r12), %xmm2 # get the appropriate shuffle mask
490     - PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
491     pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
492     - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
493     + movdqu (%r12), %xmm1
494     # get the appropriate mask to mask out top 16-r13 bytes of xmm0
495     pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
496     movdqa SHUF_MASK(%rip), %xmm10
497     @@ -1785,9 +1703,6 @@ _zero_cipher_left_encrypt:
498     pxor %xmm0, %xmm8
499     GHASH_MUL %xmm8, %xmm13, %xmm9, %xmm10, %xmm11, %xmm5, %xmm6
500     # GHASH computation for the last <16 byte block
501     - sub %r13, %r11
502     - add $16, %r11
503     -
504     movdqa SHUF_MASK(%rip), %xmm10
505     PSHUFB_XMM %xmm10, %xmm0
506    
507     diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
508     index 5c15d6b57329..c690ddc78c03 100644
509     --- a/arch/x86/crypto/aesni-intel_glue.c
510     +++ b/arch/x86/crypto/aesni-intel_glue.c
511     @@ -28,6 +28,7 @@
512     #include <crypto/cryptd.h>
513     #include <crypto/ctr.h>
514     #include <crypto/b128ops.h>
515     +#include <crypto/gcm.h>
516     #include <crypto/xts.h>
517     #include <asm/cpu_device_id.h>
518     #include <asm/fpu/api.h>
519     @@ -689,8 +690,8 @@ static int common_rfc4106_set_key(struct crypto_aead *aead, const u8 *key,
520     rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len);
521     }
522    
523     -static int rfc4106_set_key(struct crypto_aead *parent, const u8 *key,
524     - unsigned int key_len)
525     +static int gcmaes_wrapper_set_key(struct crypto_aead *parent, const u8 *key,
526     + unsigned int key_len)
527     {
528     struct cryptd_aead **ctx = crypto_aead_ctx(parent);
529     struct cryptd_aead *cryptd_tfm = *ctx;
530     @@ -715,8 +716,8 @@ static int common_rfc4106_set_authsize(struct crypto_aead *aead,
531    
532     /* This is the Integrity Check Value (aka the authentication tag length and can
533     * be 8, 12 or 16 bytes long. */
534     -static int rfc4106_set_authsize(struct crypto_aead *parent,
535     - unsigned int authsize)
536     +static int gcmaes_wrapper_set_authsize(struct crypto_aead *parent,
537     + unsigned int authsize)
538     {
539     struct cryptd_aead **ctx = crypto_aead_ctx(parent);
540     struct cryptd_aead *cryptd_tfm = *ctx;
541     @@ -823,7 +824,7 @@ static int gcmaes_decrypt(struct aead_request *req, unsigned int assoclen,
542     if (sg_is_last(req->src) &&
543     (!PageHighMem(sg_page(req->src)) ||
544     req->src->offset + req->src->length <= PAGE_SIZE) &&
545     - sg_is_last(req->dst) &&
546     + sg_is_last(req->dst) && req->dst->length &&
547     (!PageHighMem(sg_page(req->dst)) ||
548     req->dst->offset + req->dst->length <= PAGE_SIZE)) {
549     one_entry_in_sg = 1;
550     @@ -928,7 +929,7 @@ static int helper_rfc4106_decrypt(struct aead_request *req)
551     aes_ctx);
552     }
553    
554     -static int rfc4106_encrypt(struct aead_request *req)
555     +static int gcmaes_wrapper_encrypt(struct aead_request *req)
556     {
557     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
558     struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
559     @@ -944,7 +945,7 @@ static int rfc4106_encrypt(struct aead_request *req)
560     return crypto_aead_encrypt(req);
561     }
562    
563     -static int rfc4106_decrypt(struct aead_request *req)
564     +static int gcmaes_wrapper_decrypt(struct aead_request *req)
565     {
566     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
567     struct cryptd_aead **ctx = crypto_aead_ctx(tfm);
568     @@ -1115,7 +1116,7 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
569     {
570     __be32 counter = cpu_to_be32(1);
571     struct crypto_aead *tfm = crypto_aead_reqtfm(req);
572     - struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm);
573     + struct generic_gcmaes_ctx *ctx = generic_gcmaes_ctx_get(tfm);
574     void *aes_ctx = &(ctx->aes_key_expanded);
575     u8 iv[16] __attribute__ ((__aligned__(AESNI_ALIGN)));
576    
577     @@ -1126,12 +1127,36 @@ static int generic_gcmaes_decrypt(struct aead_request *req)
578     aes_ctx);
579     }
580    
581     +static int generic_gcmaes_init(struct crypto_aead *aead)
582     +{
583     + struct cryptd_aead *cryptd_tfm;
584     + struct cryptd_aead **ctx = crypto_aead_ctx(aead);
585     +
586     + cryptd_tfm = cryptd_alloc_aead("__driver-generic-gcm-aes-aesni",
587     + CRYPTO_ALG_INTERNAL,
588     + CRYPTO_ALG_INTERNAL);
589     + if (IS_ERR(cryptd_tfm))
590     + return PTR_ERR(cryptd_tfm);
591     +
592     + *ctx = cryptd_tfm;
593     + crypto_aead_set_reqsize(aead, crypto_aead_reqsize(&cryptd_tfm->base));
594     +
595     + return 0;
596     +}
597     +
598     +static void generic_gcmaes_exit(struct crypto_aead *aead)
599     +{
600     + struct cryptd_aead **ctx = crypto_aead_ctx(aead);
601     +
602     + cryptd_free_aead(*ctx);
603     +}
604     +
605     static struct aead_alg aesni_aead_algs[] = { {
606     .setkey = common_rfc4106_set_key,
607     .setauthsize = common_rfc4106_set_authsize,
608     .encrypt = helper_rfc4106_encrypt,
609     .decrypt = helper_rfc4106_decrypt,
610     - .ivsize = 8,
611     + .ivsize = GCM_RFC4106_IV_SIZE,
612     .maxauthsize = 16,
613     .base = {
614     .cra_name = "__gcm-aes-aesni",
615     @@ -1145,11 +1170,11 @@ static struct aead_alg aesni_aead_algs[] = { {
616     }, {
617     .init = rfc4106_init,
618     .exit = rfc4106_exit,
619     - .setkey = rfc4106_set_key,
620     - .setauthsize = rfc4106_set_authsize,
621     - .encrypt = rfc4106_encrypt,
622     - .decrypt = rfc4106_decrypt,
623     - .ivsize = 8,
624     + .setkey = gcmaes_wrapper_set_key,
625     + .setauthsize = gcmaes_wrapper_set_authsize,
626     + .encrypt = gcmaes_wrapper_encrypt,
627     + .decrypt = gcmaes_wrapper_decrypt,
628     + .ivsize = GCM_RFC4106_IV_SIZE,
629     .maxauthsize = 16,
630     .base = {
631     .cra_name = "rfc4106(gcm(aes))",
632     @@ -1165,7 +1190,26 @@ static struct aead_alg aesni_aead_algs[] = { {
633     .setauthsize = generic_gcmaes_set_authsize,
634     .encrypt = generic_gcmaes_encrypt,
635     .decrypt = generic_gcmaes_decrypt,
636     - .ivsize = 12,
637     + .ivsize = GCM_AES_IV_SIZE,
638     + .maxauthsize = 16,
639     + .base = {
640     + .cra_name = "__generic-gcm-aes-aesni",
641     + .cra_driver_name = "__driver-generic-gcm-aes-aesni",
642     + .cra_priority = 0,
643     + .cra_flags = CRYPTO_ALG_INTERNAL,
644     + .cra_blocksize = 1,
645     + .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
646     + .cra_alignmask = AESNI_ALIGN - 1,
647     + .cra_module = THIS_MODULE,
648     + },
649     +}, {
650     + .init = generic_gcmaes_init,
651     + .exit = generic_gcmaes_exit,
652     + .setkey = gcmaes_wrapper_set_key,
653     + .setauthsize = gcmaes_wrapper_set_authsize,
654     + .encrypt = gcmaes_wrapper_encrypt,
655     + .decrypt = gcmaes_wrapper_decrypt,
656     + .ivsize = GCM_AES_IV_SIZE,
657     .maxauthsize = 16,
658     .base = {
659     .cra_name = "gcm(aes)",
660     @@ -1173,8 +1217,7 @@ static struct aead_alg aesni_aead_algs[] = { {
661     .cra_priority = 400,
662     .cra_flags = CRYPTO_ALG_ASYNC,
663     .cra_blocksize = 1,
664     - .cra_ctxsize = sizeof(struct generic_gcmaes_ctx),
665     - .cra_alignmask = AESNI_ALIGN - 1,
666     + .cra_ctxsize = sizeof(struct cryptd_aead *),
667     .cra_module = THIS_MODULE,
668     },
669     } };
670     diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
671     index eb38ac9d9a31..4f8b80199672 100644
672     --- a/arch/x86/include/asm/kvm_host.h
673     +++ b/arch/x86/include/asm/kvm_host.h
674     @@ -1156,7 +1156,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
675     static inline int emulate_instruction(struct kvm_vcpu *vcpu,
676     int emulation_type)
677     {
678     - return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
679     + return x86_emulate_instruction(vcpu, 0,
680     + emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
681     }
682    
683     void kvm_enable_efer_bits(u64);
684     diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
685     index b20f9d623f9c..8f09012b92e7 100644
686     --- a/arch/x86/include/asm/segment.h
687     +++ b/arch/x86/include/asm/segment.h
688     @@ -236,11 +236,23 @@
689     */
690     #define EARLY_IDT_HANDLER_SIZE 9
691    
692     +/*
693     + * xen_early_idt_handler_array is for Xen pv guests: for each entry in
694     + * early_idt_handler_array it contains a prequel in the form of
695     + * pop %rcx; pop %r11; jmp early_idt_handler_array[i]; summing up to
696     + * max 8 bytes.
697     + */
698     +#define XEN_EARLY_IDT_HANDLER_SIZE 8
699     +
700     #ifndef __ASSEMBLY__
701    
702     extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
703     extern void early_ignore_irq(void);
704    
705     +#if defined(CONFIG_X86_64) && defined(CONFIG_XEN_PV)
706     +extern const char xen_early_idt_handler_array[NUM_EXCEPTION_VECTORS][XEN_EARLY_IDT_HANDLER_SIZE];
707     +#endif
708     +
709     /*
710     * Load a segment. Fall back on loading the zero segment if something goes
711     * wrong. This variant assumes that loading zero fully clears the segment.
712     diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
713     index cdc70a3a6583..c2cea6651279 100644
714     --- a/arch/x86/kvm/cpuid.h
715     +++ b/arch/x86/kvm/cpuid.h
716     @@ -44,7 +44,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
717     [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
718     [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
719     [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
720     - [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
721     + [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
722     [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
723     [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
724     [CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
725     diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
726     index 7bbb5da2b49d..eca6a89f2326 100644
727     --- a/arch/x86/kvm/emulate.c
728     +++ b/arch/x86/kvm/emulate.c
729     @@ -4023,6 +4023,26 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
730     fxstate_size(ctxt));
731     }
732    
733     +/*
734     + * FXRSTOR might restore XMM registers not provided by the guest. Fill
735     + * in the host registers (via FXSAVE) instead, so they won't be modified.
736     + * (preemption has to stay disabled until FXRSTOR).
737     + *
738     + * Use noinline to keep the stack for other functions called by callers small.
739     + */
740     +static noinline int fxregs_fixup(struct fxregs_state *fx_state,
741     + const size_t used_size)
742     +{
743     + struct fxregs_state fx_tmp;
744     + int rc;
745     +
746     + rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
747     + memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
748     + __fxstate_size(16) - used_size);
749     +
750     + return rc;
751     +}
752     +
753     static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
754     {
755     struct fxregs_state fx_state;
756     @@ -4033,19 +4053,19 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
757     if (rc != X86EMUL_CONTINUE)
758     return rc;
759    
760     + size = fxstate_size(ctxt);
761     + rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
762     + if (rc != X86EMUL_CONTINUE)
763     + return rc;
764     +
765     ctxt->ops->get_fpu(ctxt);
766    
767     - size = fxstate_size(ctxt);
768     if (size < __fxstate_size(16)) {
769     - rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
770     + rc = fxregs_fixup(&fx_state, size);
771     if (rc != X86EMUL_CONTINUE)
772     goto out;
773     }
774    
775     - rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
776     - if (rc != X86EMUL_CONTINUE)
777     - goto out;
778     -
779     if (fx_state.mxcsr >> 16) {
780     rc = emulate_gp(ctxt, 0);
781     goto out;
782     @@ -5009,6 +5029,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
783     bool op_prefix = false;
784     bool has_seg_override = false;
785     struct opcode opcode;
786     + u16 dummy;
787     + struct desc_struct desc;
788    
789     ctxt->memop.type = OP_NONE;
790     ctxt->memopp = NULL;
791     @@ -5027,6 +5049,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
792     switch (mode) {
793     case X86EMUL_MODE_REAL:
794     case X86EMUL_MODE_VM86:
795     + def_op_bytes = def_ad_bytes = 2;
796     + ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
797     + if (desc.d)
798     + def_op_bytes = def_ad_bytes = 4;
799     + break;
800     case X86EMUL_MODE_PROT16:
801     def_op_bytes = def_ad_bytes = 2;
802     break;
803     diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
804     index bdff437acbcb..9d270ba9643c 100644
805     --- a/arch/x86/kvm/ioapic.c
806     +++ b/arch/x86/kvm/ioapic.c
807     @@ -257,8 +257,7 @@ void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
808     index == RTC_GSI) {
809     if (kvm_apic_match_dest(vcpu, NULL, 0,
810     e->fields.dest_id, e->fields.dest_mode) ||
811     - (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
812     - kvm_apic_pending_eoi(vcpu, e->fields.vector)))
813     + kvm_apic_pending_eoi(vcpu, e->fields.vector))
814     __set_bit(e->fields.vector,
815     ioapic_handled_vectors);
816     }
817     @@ -277,6 +276,7 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
818     {
819     unsigned index;
820     bool mask_before, mask_after;
821     + int old_remote_irr, old_delivery_status;
822     union kvm_ioapic_redirect_entry *e;
823    
824     switch (ioapic->ioregsel) {
825     @@ -299,14 +299,28 @@ static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
826     return;
827     e = &ioapic->redirtbl[index];
828     mask_before = e->fields.mask;
829     + /* Preserve read-only fields */
830     + old_remote_irr = e->fields.remote_irr;
831     + old_delivery_status = e->fields.delivery_status;
832     if (ioapic->ioregsel & 1) {
833     e->bits &= 0xffffffff;
834     e->bits |= (u64) val << 32;
835     } else {
836     e->bits &= ~0xffffffffULL;
837     e->bits |= (u32) val;
838     - e->fields.remote_irr = 0;
839     }
840     + e->fields.remote_irr = old_remote_irr;
841     + e->fields.delivery_status = old_delivery_status;
842     +
843     + /*
844     + * Some OSes (Linux, Xen) assume that Remote IRR bit will
845     + * be cleared by IOAPIC hardware when the entry is configured
846     + * as edge-triggered. This behavior is used to simulate an
847     + * explicit EOI on IOAPICs that don't have the EOI register.
848     + */
849     + if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
850     + e->fields.remote_irr = 0;
851     +
852     mask_after = e->fields.mask;
853     if (mask_before != mask_after)
854     kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
855     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
856     index ef16cf0f7cfd..a45063a9219c 100644
857     --- a/arch/x86/kvm/vmx.c
858     +++ b/arch/x86/kvm/vmx.c
859     @@ -5606,7 +5606,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
860     vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
861     }
862    
863     - vmcs_writel(GUEST_RFLAGS, 0x02);
864     + kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
865     kvm_rip_write(vcpu, 0xfff0);
866    
867     vmcs_writel(GUEST_GDTR_BASE, 0);
868     @@ -5919,10 +5919,6 @@ static int handle_exception(struct kvm_vcpu *vcpu)
869     return 1; /* already handled by vmx_vcpu_run() */
870    
871     if (is_invalid_opcode(intr_info)) {
872     - if (is_guest_mode(vcpu)) {
873     - kvm_queue_exception(vcpu, UD_VECTOR);
874     - return 1;
875     - }
876     er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
877     if (er == EMULATE_USER_EXIT)
878     return 0;
879     @@ -6608,7 +6604,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
880     if (kvm_test_request(KVM_REQ_EVENT, vcpu))
881     return 1;
882    
883     - err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
884     + err = emulate_instruction(vcpu, 0);
885    
886     if (err == EMULATE_USER_EXIT) {
887     ++vcpu->stat.mmio_exits;
888     @@ -11115,13 +11111,12 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
889     {
890     struct vcpu_vmx *vmx = to_vmx(vcpu);
891     unsigned long exit_qual;
892     -
893     - if (kvm_event_needs_reinjection(vcpu))
894     - return -EBUSY;
895     + bool block_nested_events =
896     + vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
897    
898     if (vcpu->arch.exception.pending &&
899     nested_vmx_check_exception(vcpu, &exit_qual)) {
900     - if (vmx->nested.nested_run_pending)
901     + if (block_nested_events)
902     return -EBUSY;
903     nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
904     vcpu->arch.exception.pending = false;
905     @@ -11130,14 +11125,14 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
906    
907     if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
908     vmx->nested.preemption_timer_expired) {
909     - if (vmx->nested.nested_run_pending)
910     + if (block_nested_events)
911     return -EBUSY;
912     nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
913     return 0;
914     }
915    
916     if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
917     - if (vmx->nested.nested_run_pending)
918     + if (block_nested_events)
919     return -EBUSY;
920     nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
921     NMI_VECTOR | INTR_TYPE_NMI_INTR |
922     @@ -11153,7 +11148,7 @@ static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
923    
924     if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
925     nested_exit_on_intr(vcpu)) {
926     - if (vmx->nested.nested_run_pending)
927     + if (block_nested_events)
928     return -EBUSY;
929     nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
930     return 0;
931     @@ -11340,6 +11335,24 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
932     kvm_clear_interrupt_queue(vcpu);
933     }
934    
935     +static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
936     + struct vmcs12 *vmcs12)
937     +{
938     + u32 entry_failure_code;
939     +
940     + nested_ept_uninit_mmu_context(vcpu);
941     +
942     + /*
943     + * Only PDPTE load can fail as the value of cr3 was checked on entry and
944     + * couldn't have changed.
945     + */
946     + if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
947     + nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
948     +
949     + if (!enable_ept)
950     + vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
951     +}
952     +
953     /*
954     * A part of what we need to when the nested L2 guest exits and we want to
955     * run its L1 parent, is to reset L1's guest state to the host state specified
956     @@ -11353,7 +11366,6 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
957     struct vmcs12 *vmcs12)
958     {
959     struct kvm_segment seg;
960     - u32 entry_failure_code;
961    
962     if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
963     vcpu->arch.efer = vmcs12->host_ia32_efer;
964     @@ -11380,17 +11392,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
965     vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
966     vmx_set_cr4(vcpu, vmcs12->host_cr4);
967    
968     - nested_ept_uninit_mmu_context(vcpu);
969     -
970     - /*
971     - * Only PDPTE load can fail as the value of cr3 was checked on entry and
972     - * couldn't have changed.
973     - */
974     - if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
975     - nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
976     -
977     - if (!enable_ept)
978     - vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
979     + load_vmcs12_mmu_host_state(vcpu, vmcs12);
980    
981     if (enable_vpid) {
982     /*
983     @@ -11616,6 +11618,9 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
984     * accordingly.
985     */
986     nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
987     +
988     + load_vmcs12_mmu_host_state(vcpu, vmcs12);
989     +
990     /*
991     * The emulated instruction was already skipped in
992     * nested_vmx_run, but the updated RIP was never
993     diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
994     index 575c8953cc9a..8c28023a43b1 100644
995     --- a/arch/x86/kvm/x86.c
996     +++ b/arch/x86/kvm/x86.c
997     @@ -1795,10 +1795,13 @@ u64 get_kvmclock_ns(struct kvm *kvm)
998     /* both __this_cpu_read() and rdtsc() should be on the same cpu */
999     get_cpu();
1000    
1001     - kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
1002     - &hv_clock.tsc_shift,
1003     - &hv_clock.tsc_to_system_mul);
1004     - ret = __pvclock_read_cycles(&hv_clock, rdtsc());
1005     + if (__this_cpu_read(cpu_tsc_khz)) {
1006     + kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
1007     + &hv_clock.tsc_shift,
1008     + &hv_clock.tsc_to_system_mul);
1009     + ret = __pvclock_read_cycles(&hv_clock, rdtsc());
1010     + } else
1011     + ret = ktime_get_boot_ns() + ka->kvmclock_offset;
1012    
1013     put_cpu();
1014    
1015     @@ -5416,7 +5419,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
1016     vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1017     vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
1018     vcpu->run->internal.ndata = 0;
1019     - r = EMULATE_FAIL;
1020     + r = EMULATE_USER_EXIT;
1021     }
1022     kvm_queue_exception(vcpu, UD_VECTOR);
1023    
1024     @@ -7242,12 +7245,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1025     {
1026     struct fpu *fpu = &current->thread.fpu;
1027     int r;
1028     - sigset_t sigsaved;
1029    
1030     fpu__initialize(fpu);
1031    
1032     - if (vcpu->sigset_active)
1033     - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1034     + kvm_sigset_activate(vcpu);
1035    
1036     if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
1037     if (kvm_run->immediate_exit) {
1038     @@ -7290,8 +7291,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1039    
1040     out:
1041     post_kvm_run_save(vcpu);
1042     - if (vcpu->sigset_active)
1043     - sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1044     + kvm_sigset_deactivate(vcpu);
1045    
1046     return r;
1047     }
1048     diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
1049     index 30bc4812ceb8..9fe656c42aa5 100644
1050     --- a/arch/x86/mm/extable.c
1051     +++ b/arch/x86/mm/extable.c
1052     @@ -1,6 +1,7 @@
1053     #include <linux/extable.h>
1054     #include <linux/uaccess.h>
1055     #include <linux/sched/debug.h>
1056     +#include <xen/xen.h>
1057    
1058     #include <asm/fpu/internal.h>
1059     #include <asm/traps.h>
1060     @@ -212,8 +213,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
1061     * Old CPUs leave the high bits of CS on the stack
1062     * undefined. I'm not sure which CPUs do this, but at least
1063     * the 486 DX works this way.
1064     + * Xen pv domains are not using the default __KERNEL_CS.
1065     */
1066     - if (regs->cs != __KERNEL_CS)
1067     + if (!xen_pv_domain() && regs->cs != __KERNEL_CS)
1068     goto fail;
1069    
1070     /*
1071     diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
1072     index ae3a071e1d0f..899a22a02e95 100644
1073     --- a/arch/x86/xen/enlighten_pv.c
1074     +++ b/arch/x86/xen/enlighten_pv.c
1075     @@ -622,7 +622,7 @@ static struct trap_array_entry trap_array[] = {
1076     { simd_coprocessor_error, xen_simd_coprocessor_error, false },
1077     };
1078    
1079     -static bool get_trap_addr(void **addr, unsigned int ist)
1080     +static bool __ref get_trap_addr(void **addr, unsigned int ist)
1081     {
1082     unsigned int nr;
1083     bool ist_okay = false;
1084     @@ -644,6 +644,14 @@ static bool get_trap_addr(void **addr, unsigned int ist)
1085     }
1086     }
1087    
1088     + if (nr == ARRAY_SIZE(trap_array) &&
1089     + *addr >= (void *)early_idt_handler_array[0] &&
1090     + *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) {
1091     + nr = (*addr - (void *)early_idt_handler_array[0]) /
1092     + EARLY_IDT_HANDLER_SIZE;
1093     + *addr = (void *)xen_early_idt_handler_array[nr];
1094     + }
1095     +
1096     if (WARN_ON(ist != 0 && !ist_okay))
1097     return false;
1098    
1099     @@ -1261,6 +1269,21 @@ asmlinkage __visible void __init xen_start_kernel(void)
1100     xen_setup_gdt(0);
1101    
1102     xen_init_irq_ops();
1103     +
1104     + /* Let's presume PV guests always boot on vCPU with id 0. */
1105     + per_cpu(xen_vcpu_id, 0) = 0;
1106     +
1107     + /*
1108     + * Setup xen_vcpu early because idt_setup_early_handler needs it for
1109     + * local_irq_disable(), irqs_disabled().
1110     + *
1111     + * Don't do the full vcpu_info placement stuff until we have
1112     + * the cpu_possible_mask and a non-dummy shared_info.
1113     + */
1114     + xen_vcpu_info_reset(0);
1115     +
1116     + idt_setup_early_handler();
1117     +
1118     xen_init_capabilities();
1119    
1120     #ifdef CONFIG_X86_LOCAL_APIC
1121     @@ -1294,18 +1317,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
1122     */
1123     acpi_numa = -1;
1124     #endif
1125     - /* Let's presume PV guests always boot on vCPU with id 0. */
1126     - per_cpu(xen_vcpu_id, 0) = 0;
1127     -
1128     - /*
1129     - * Setup xen_vcpu early because start_kernel needs it for
1130     - * local_irq_disable(), irqs_disabled().
1131     - *
1132     - * Don't do the full vcpu_info placement stuff until we have
1133     - * the cpu_possible_mask and a non-dummy shared_info.
1134     - */
1135     - xen_vcpu_info_reset(0);
1136     -
1137     WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv));
1138    
1139     local_irq_disable();
1140     diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
1141     index 8a10c9a9e2b5..417b339e5c8e 100644
1142     --- a/arch/x86/xen/xen-asm_64.S
1143     +++ b/arch/x86/xen/xen-asm_64.S
1144     @@ -15,6 +15,7 @@
1145    
1146     #include <xen/interface/xen.h>
1147    
1148     +#include <linux/init.h>
1149     #include <linux/linkage.h>
1150    
1151     .macro xen_pv_trap name
1152     @@ -54,6 +55,19 @@ xen_pv_trap entry_INT80_compat
1153     #endif
1154     xen_pv_trap hypervisor_callback
1155    
1156     + __INIT
1157     +ENTRY(xen_early_idt_handler_array)
1158     + i = 0
1159     + .rept NUM_EXCEPTION_VECTORS
1160     + pop %rcx
1161     + pop %r11
1162     + jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
1163     + i = i + 1
1164     + .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
1165     + .endr
1166     +END(xen_early_idt_handler_array)
1167     + __FINIT
1168     +
1169     hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
1170     /*
1171     * Xen64 iret frame:
1172     diff --git a/crypto/Kconfig b/crypto/Kconfig
1173     index ac5fb37e6f4b..42212b60a0ee 100644
1174     --- a/crypto/Kconfig
1175     +++ b/crypto/Kconfig
1176     @@ -130,7 +130,7 @@ config CRYPTO_DH
1177    
1178     config CRYPTO_ECDH
1179     tristate "ECDH algorithm"
1180     - select CRYTPO_KPP
1181     + select CRYPTO_KPP
1182     select CRYPTO_RNG_DEFAULT
1183     help
1184     Generic implementation of the ECDH algorithm
1185     diff --git a/crypto/af_alg.c b/crypto/af_alg.c
1186     index 6ec360213107..53b7fa4cf4ab 100644
1187     --- a/crypto/af_alg.c
1188     +++ b/crypto/af_alg.c
1189     @@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(af_alg_release_parent);
1190    
1191     static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1192     {
1193     - const u32 forbidden = CRYPTO_ALG_INTERNAL;
1194     + const u32 allowed = CRYPTO_ALG_KERN_DRIVER_ONLY;
1195     struct sock *sk = sock->sk;
1196     struct alg_sock *ask = alg_sk(sk);
1197     struct sockaddr_alg *sa = (void *)uaddr;
1198     @@ -158,6 +158,10 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1199     void *private;
1200     int err;
1201    
1202     + /* If caller uses non-allowed flag, return error. */
1203     + if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
1204     + return -EINVAL;
1205     +
1206     if (sock->state == SS_CONNECTED)
1207     return -EINVAL;
1208    
1209     @@ -176,9 +180,7 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
1210     if (IS_ERR(type))
1211     return PTR_ERR(type);
1212    
1213     - private = type->bind(sa->salg_name,
1214     - sa->salg_feat & ~forbidden,
1215     - sa->salg_mask & ~forbidden);
1216     + private = type->bind(sa->salg_name, sa->salg_feat, sa->salg_mask);
1217     if (IS_ERR(private)) {
1218     module_put(type->owner);
1219     return PTR_ERR(private);
1220     diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
1221     index 7e8ed96236ce..a68be626017c 100644
1222     --- a/crypto/sha3_generic.c
1223     +++ b/crypto/sha3_generic.c
1224     @@ -18,6 +18,7 @@
1225     #include <linux/types.h>
1226     #include <crypto/sha3.h>
1227     #include <asm/byteorder.h>
1228     +#include <asm/unaligned.h>
1229    
1230     #define KECCAK_ROUNDS 24
1231    
1232     @@ -149,7 +150,7 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
1233     unsigned int i;
1234    
1235     for (i = 0; i < sctx->rsizw; i++)
1236     - sctx->st[i] ^= ((u64 *) src)[i];
1237     + sctx->st[i] ^= get_unaligned_le64(src + 8 * i);
1238     keccakf(sctx->st);
1239    
1240     done += sctx->rsiz;
1241     @@ -174,7 +175,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out)
1242     sctx->buf[sctx->rsiz - 1] |= 0x80;
1243    
1244     for (i = 0; i < sctx->rsizw; i++)
1245     - sctx->st[i] ^= ((u64 *) sctx->buf)[i];
1246     + sctx->st[i] ^= get_unaligned_le64(sctx->buf + 8 * i);
1247    
1248     keccakf(sctx->st);
1249    
1250     diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c
1251     index 24418932612e..a041689e5701 100644
1252     --- a/drivers/acpi/device_sysfs.c
1253     +++ b/drivers/acpi/device_sysfs.c
1254     @@ -146,6 +146,10 @@ static int create_pnp_modalias(struct acpi_device *acpi_dev, char *modalias,
1255     int count;
1256     struct acpi_hardware_id *id;
1257    
1258     + /* Avoid unnecessarily loading modules for non present devices. */
1259     + if (!acpi_device_is_present(acpi_dev))
1260     + return 0;
1261     +
1262     /*
1263     * Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
1264     * be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
1265     diff --git a/drivers/android/binder.c b/drivers/android/binder.c
1266     index a340766b51fe..2ef8bd29e188 100644
1267     --- a/drivers/android/binder.c
1268     +++ b/drivers/android/binder.c
1269     @@ -4302,6 +4302,18 @@ static int binder_thread_release(struct binder_proc *proc,
1270     if (t)
1271     spin_lock(&t->lock);
1272     }
1273     +
1274     + /*
1275     + * If this thread used poll, make sure we remove the waitqueue
1276     + * from any epoll data structures holding it with POLLFREE.
1277     + * waitqueue_active() is safe to use here because we're holding
1278     + * the inner lock.
1279     + */
1280     + if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
1281     + waitqueue_active(&thread->wait)) {
1282     + wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
1283     + }
1284     +
1285     binder_inner_proc_unlock(thread->proc);
1286    
1287     if (send_reply)
1288     diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c
1289     index c2819a3d58a6..6cb148268676 100644
1290     --- a/drivers/android/binder_alloc.c
1291     +++ b/drivers/android/binder_alloc.c
1292     @@ -668,7 +668,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc,
1293     goto err_already_mapped;
1294     }
1295    
1296     - area = get_vm_area(vma->vm_end - vma->vm_start, VM_IOREMAP);
1297     + area = get_vm_area(vma->vm_end - vma->vm_start, VM_ALLOC);
1298     if (area == NULL) {
1299     ret = -ENOMEM;
1300     failure_string = "get_vm_area";
1301     diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
1302     index d7d21118d3e0..2c2ed9cf8796 100644
1303     --- a/drivers/auxdisplay/Kconfig
1304     +++ b/drivers/auxdisplay/Kconfig
1305     @@ -136,6 +136,7 @@ config CFAG12864B_RATE
1306    
1307     config IMG_ASCII_LCD
1308     tristate "Imagination Technologies ASCII LCD Display"
1309     + depends on HAS_IOMEM
1310     default y if MIPS_MALTA || MIPS_SEAD3
1311     select SYSCON
1312     help
1313     diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1314     index 85de67334695..a2a0dce5114e 100644
1315     --- a/drivers/block/loop.c
1316     +++ b/drivers/block/loop.c
1317     @@ -1576,9 +1576,8 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
1318     return err;
1319     }
1320    
1321     -static void lo_release(struct gendisk *disk, fmode_t mode)
1322     +static void __lo_release(struct loop_device *lo)
1323     {
1324     - struct loop_device *lo = disk->private_data;
1325     int err;
1326    
1327     if (atomic_dec_return(&lo->lo_refcnt))
1328     @@ -1605,6 +1604,13 @@ static void lo_release(struct gendisk *disk, fmode_t mode)
1329     mutex_unlock(&lo->lo_ctl_mutex);
1330     }
1331    
1332     +static void lo_release(struct gendisk *disk, fmode_t mode)
1333     +{
1334     + mutex_lock(&loop_index_mutex);
1335     + __lo_release(disk->private_data);
1336     + mutex_unlock(&loop_index_mutex);
1337     +}
1338     +
1339     static const struct block_device_operations lo_fops = {
1340     .owner = THIS_MODULE,
1341     .open = lo_open,
1342     diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
1343     index 4d55af5c6e5b..69dfa1d3f453 100644
1344     --- a/drivers/block/null_blk.c
1345     +++ b/drivers/block/null_blk.c
1346     @@ -467,7 +467,6 @@ static void nullb_device_release(struct config_item *item)
1347     {
1348     struct nullb_device *dev = to_nullb_device(item);
1349    
1350     - badblocks_exit(&dev->badblocks);
1351     null_free_device_storage(dev, false);
1352     null_free_dev(dev);
1353     }
1354     @@ -578,6 +577,10 @@ static struct nullb_device *null_alloc_dev(void)
1355    
1356     static void null_free_dev(struct nullb_device *dev)
1357     {
1358     + if (!dev)
1359     + return;
1360     +
1361     + badblocks_exit(&dev->badblocks);
1362     kfree(dev);
1363     }
1364    
1365     diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
1366     index 4ebae43118ef..d8addbce40bc 100644
1367     --- a/drivers/cpufreq/Kconfig
1368     +++ b/drivers/cpufreq/Kconfig
1369     @@ -275,6 +275,7 @@ config BMIPS_CPUFREQ
1370    
1371     config LOONGSON2_CPUFREQ
1372     tristate "Loongson2 CPUFreq Driver"
1373     + depends on LEMOTE_MACH2F
1374     help
1375     This option adds a CPUFreq driver for loongson processors which
1376     support software configurable cpu frequency.
1377     @@ -287,6 +288,7 @@ config LOONGSON2_CPUFREQ
1378    
1379     config LOONGSON1_CPUFREQ
1380     tristate "Loongson1 CPUFreq Driver"
1381     + depends on LOONGSON1_LS1B
1382     help
1383     This option adds a CPUFreq driver for loongson1 processors which
1384     support software configurable cpu frequency.
1385     diff --git a/drivers/crypto/inside-secure/safexcel_hash.c b/drivers/crypto/inside-secure/safexcel_hash.c
1386     index 3980f946874f..0626b33d2886 100644
1387     --- a/drivers/crypto/inside-secure/safexcel_hash.c
1388     +++ b/drivers/crypto/inside-secure/safexcel_hash.c
1389     @@ -33,6 +33,8 @@ struct safexcel_ahash_req {
1390     bool finish;
1391     bool hmac;
1392    
1393     + int nents;
1394     +
1395     u8 state_sz; /* expected sate size, only set once */
1396     u32 state[SHA256_DIGEST_SIZE / sizeof(u32)];
1397    
1398     @@ -151,8 +153,10 @@ static int safexcel_handle_result(struct safexcel_crypto_priv *priv, int ring,
1399     result_sz = crypto_ahash_digestsize(ahash);
1400     memcpy(sreq->state, areq->result, result_sz);
1401    
1402     - dma_unmap_sg(priv->dev, areq->src,
1403     - sg_nents_for_len(areq->src, areq->nbytes), DMA_TO_DEVICE);
1404     + if (sreq->nents) {
1405     + dma_unmap_sg(priv->dev, areq->src, sreq->nents, DMA_TO_DEVICE);
1406     + sreq->nents = 0;
1407     + }
1408    
1409     safexcel_free_context(priv, async, sreq->state_sz);
1410    
1411     @@ -177,7 +181,7 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
1412     struct safexcel_command_desc *cdesc, *first_cdesc = NULL;
1413     struct safexcel_result_desc *rdesc;
1414     struct scatterlist *sg;
1415     - int i, nents, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
1416     + int i, queued, len, cache_len, extra, n_cdesc = 0, ret = 0;
1417    
1418     queued = len = req->len - req->processed;
1419     if (queued < crypto_ahash_blocksize(ahash))
1420     @@ -185,17 +189,31 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
1421     else
1422     cache_len = queued - areq->nbytes;
1423    
1424     - /*
1425     - * If this is not the last request and the queued data does not fit
1426     - * into full blocks, cache it for the next send() call.
1427     - */
1428     - extra = queued & (crypto_ahash_blocksize(ahash) - 1);
1429     - if (!req->last_req && extra) {
1430     - sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1431     - req->cache_next, extra, areq->nbytes - extra);
1432     -
1433     - queued -= extra;
1434     - len -= extra;
1435     + if (!req->last_req) {
1436     + /* If this is not the last request and the queued data does not
1437     + * fit into full blocks, cache it for the next send() call.
1438     + */
1439     + extra = queued & (crypto_ahash_blocksize(ahash) - 1);
1440     + if (!extra)
1441     + /* If this is not the last request and the queued data
1442     + * is a multiple of a block, cache the last one for now.
1443     + */
1444     + extra = queued - crypto_ahash_blocksize(ahash);
1445     +
1446     + if (extra) {
1447     + sg_pcopy_to_buffer(areq->src, sg_nents(areq->src),
1448     + req->cache_next, extra,
1449     + areq->nbytes - extra);
1450     +
1451     + queued -= extra;
1452     + len -= extra;
1453     +
1454     + if (!queued) {
1455     + *commands = 0;
1456     + *results = 0;
1457     + return 0;
1458     + }
1459     + }
1460     }
1461    
1462     spin_lock_bh(&priv->ring[ring].egress_lock);
1463     @@ -233,15 +251,15 @@ static int safexcel_ahash_send(struct crypto_async_request *async, int ring,
1464     }
1465    
1466     /* Now handle the current ahash request buffer(s) */
1467     - nents = dma_map_sg(priv->dev, areq->src,
1468     - sg_nents_for_len(areq->src, areq->nbytes),
1469     - DMA_TO_DEVICE);
1470     - if (!nents) {
1471     + req->nents = dma_map_sg(priv->dev, areq->src,
1472     + sg_nents_for_len(areq->src, areq->nbytes),
1473     + DMA_TO_DEVICE);
1474     + if (!req->nents) {
1475     ret = -ENOMEM;
1476     goto cdesc_rollback;
1477     }
1478    
1479     - for_each_sg(areq->src, sg, nents, i) {
1480     + for_each_sg(areq->src, sg, req->nents, i) {
1481     int sglen = sg_dma_len(sg);
1482    
1483     /* Do not overflow the request */
1484     diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
1485     index 2b4c39fdfa91..86210f75d233 100644
1486     --- a/drivers/firmware/efi/Kconfig
1487     +++ b/drivers/firmware/efi/Kconfig
1488     @@ -159,7 +159,10 @@ config RESET_ATTACK_MITIGATION
1489     using the TCG Platform Reset Attack Mitigation specification. This
1490     protects against an attacker forcibly rebooting the system while it
1491     still contains secrets in RAM, booting another OS and extracting the
1492     - secrets.
1493     + secrets. This should only be enabled when userland is configured to
1494     + clear the MemoryOverwriteRequest flag on clean shutdown after secrets
1495     + have been evicted, since otherwise it will trigger even on clean
1496     + reboots.
1497    
1498     endmenu
1499    
1500     diff --git a/drivers/gpio/gpio-ath79.c b/drivers/gpio/gpio-ath79.c
1501     index f33d4a5fe671..af0baf8da295 100644
1502     --- a/drivers/gpio/gpio-ath79.c
1503     +++ b/drivers/gpio/gpio-ath79.c
1504     @@ -323,3 +323,6 @@ static struct platform_driver ath79_gpio_driver = {
1505     };
1506    
1507     module_platform_driver(ath79_gpio_driver);
1508     +
1509     +MODULE_DESCRIPTION("Atheros AR71XX/AR724X/AR913X GPIO API support");
1510     +MODULE_LICENSE("GPL v2");
1511     diff --git a/drivers/gpio/gpio-iop.c b/drivers/gpio/gpio-iop.c
1512     index 98c7ff2a76e7..8d62db447ec1 100644
1513     --- a/drivers/gpio/gpio-iop.c
1514     +++ b/drivers/gpio/gpio-iop.c
1515     @@ -58,3 +58,7 @@ static int __init iop3xx_gpio_init(void)
1516     return platform_driver_register(&iop3xx_gpio_driver);
1517     }
1518     arch_initcall(iop3xx_gpio_init);
1519     +
1520     +MODULE_DESCRIPTION("GPIO handling for Intel IOP3xx processors");
1521     +MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
1522     +MODULE_LICENSE("GPL");
1523     diff --git a/drivers/gpio/gpio-stmpe.c b/drivers/gpio/gpio-stmpe.c
1524     index 16cbc5702865..491b0974c0fe 100644
1525     --- a/drivers/gpio/gpio-stmpe.c
1526     +++ b/drivers/gpio/gpio-stmpe.c
1527     @@ -190,6 +190,16 @@ static void stmpe_gpio_irq_sync_unlock(struct irq_data *d)
1528     };
1529     int i, j;
1530    
1531     + /*
1532     + * STMPE1600: to be able to get IRQ from pins,
1533     + * a read must be done on GPMR register, or a write in
1534     + * GPSR or GPCR registers
1535     + */
1536     + if (stmpe->partnum == STMPE1600) {
1537     + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_LSB]);
1538     + stmpe_reg_read(stmpe, stmpe->regs[STMPE_IDX_GPMR_CSB]);
1539     + }
1540     +
1541     for (i = 0; i < CACHE_NR_REGS; i++) {
1542     /* STMPE801 and STMPE1600 don't have RE and FE registers */
1543     if ((stmpe->partnum == STMPE801 ||
1544     @@ -227,21 +237,11 @@ static void stmpe_gpio_irq_unmask(struct irq_data *d)
1545     {
1546     struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
1547     struct stmpe_gpio *stmpe_gpio = gpiochip_get_data(gc);
1548     - struct stmpe *stmpe = stmpe_gpio->stmpe;
1549     int offset = d->hwirq;
1550     int regoffset = offset / 8;
1551     int mask = BIT(offset % 8);
1552    
1553     stmpe_gpio->regs[REG_IE][regoffset] |= mask;
1554     -
1555     - /*
1556     - * STMPE1600 workaround: to be able to get IRQ from pins,
1557     - * a read must be done on GPMR register, or a write in
1558     - * GPSR or GPCR registers
1559     - */
1560     - if (stmpe->partnum == STMPE1600)
1561     - stmpe_reg_read(stmpe,
1562     - stmpe->regs[STMPE_IDX_GPMR_LSB + regoffset]);
1563     }
1564    
1565     static void stmpe_dbg_show_one(struct seq_file *s,
1566     diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
1567     index eb80dac4e26a..bdd68ff197dc 100644
1568     --- a/drivers/gpio/gpiolib.c
1569     +++ b/drivers/gpio/gpiolib.c
1570     @@ -723,6 +723,9 @@ static irqreturn_t lineevent_irq_thread(int irq, void *p)
1571     struct gpioevent_data ge;
1572     int ret, level;
1573    
1574     + /* Do not leak kernel stack to userspace */
1575     + memset(&ge, 0, sizeof(ge));
1576     +
1577     ge.timestamp = ktime_get_real_ns();
1578     level = gpiod_get_value_cansleep(le->desc);
1579    
1580     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
1581     index b9dbbf9cb8b0..bdabaa3399db 100644
1582     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
1583     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
1584     @@ -369,29 +369,50 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
1585     {
1586     struct amdgpu_device *adev = get_amdgpu_device(kgd);
1587     struct cik_sdma_rlc_registers *m;
1588     + unsigned long end_jiffies;
1589     uint32_t sdma_base_addr;
1590     + uint32_t data;
1591    
1592     m = get_sdma_mqd(mqd);
1593     sdma_base_addr = get_sdma_base_addr(m);
1594    
1595     - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
1596     - m->sdma_rlc_virtual_addr);
1597     + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
1598     + m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
1599    
1600     - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
1601     - m->sdma_rlc_rb_base);
1602     + end_jiffies = msecs_to_jiffies(2000) + jiffies;
1603     + while (true) {
1604     + data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
1605     + if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
1606     + break;
1607     + if (time_after(jiffies, end_jiffies))
1608     + return -ETIME;
1609     + usleep_range(500, 1000);
1610     + }
1611     + if (m->sdma_engine_id) {
1612     + data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
1613     + data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
1614     + RESUME_CTX, 0);
1615     + WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
1616     + } else {
1617     + data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
1618     + data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
1619     + RESUME_CTX, 0);
1620     + WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
1621     + }
1622    
1623     + WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
1624     + m->sdma_rlc_doorbell);
1625     + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
1626     + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
1627     + WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
1628     + m->sdma_rlc_virtual_addr);
1629     + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
1630     WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
1631     m->sdma_rlc_rb_base_hi);
1632     -
1633     WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
1634     m->sdma_rlc_rb_rptr_addr_lo);
1635     -
1636     WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
1637     m->sdma_rlc_rb_rptr_addr_hi);
1638     -
1639     - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
1640     - m->sdma_rlc_doorbell);
1641     -
1642     WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
1643     m->sdma_rlc_rb_cntl);
1644    
1645     @@ -564,9 +585,9 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
1646     }
1647    
1648     WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
1649     - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
1650     - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
1651     - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
1652     + WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
1653     + RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
1654     + SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
1655    
1656     return 0;
1657     }
1658     diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1659     index 60d8bedb694d..b5aa8e6f8e0b 100644
1660     --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1661     +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
1662     @@ -403,6 +403,10 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
1663     if (candidate->robj == validated)
1664     break;
1665    
1666     + /* We can't move pinned BOs here */
1667     + if (bo->pin_count)
1668     + continue;
1669     +
1670     other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
1671    
1672     /* Check if this BO is in one of the domains we need space for */
1673     diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
1674     index 44ffd23348fc..164fa4b1f9a9 100644
1675     --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
1676     +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
1677     @@ -205,8 +205,8 @@ static int update_mqd_sdma(struct mqd_manager *mm, void *mqd,
1678     struct cik_sdma_rlc_registers *m;
1679    
1680     m = get_sdma_mqd(mqd);
1681     - m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
1682     - SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
1683     + m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
1684     + << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
1685     q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1686     1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
1687     6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
1688     diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1689     index 03bec765b03d..f9a1a4db9be7 100644
1690     --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1691     +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
1692     @@ -184,6 +184,24 @@ int pqm_create_queue(struct process_queue_manager *pqm,
1693    
1694     switch (type) {
1695     case KFD_QUEUE_TYPE_SDMA:
1696     + if (dev->dqm->queue_count >=
1697     + CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
1698     + pr_err("Over-subscription is not allowed for SDMA.\n");
1699     + retval = -EPERM;
1700     + goto err_create_queue;
1701     + }
1702     +
1703     + retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
1704     + if (retval != 0)
1705     + goto err_create_queue;
1706     + pqn->q = q;
1707     + pqn->kq = NULL;
1708     + retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
1709     + &q->properties.vmid);
1710     + pr_debug("DQM returned %d for create_queue\n", retval);
1711     + print_queue(q);
1712     + break;
1713     +
1714     case KFD_QUEUE_TYPE_COMPUTE:
1715     /* check if there is over subscription */
1716     if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
1717     diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c
1718     index 0903ba574f61..75b0d3f6e4de 100644
1719     --- a/drivers/gpu/drm/bridge/lvds-encoder.c
1720     +++ b/drivers/gpu/drm/bridge/lvds-encoder.c
1721     @@ -13,13 +13,37 @@
1722    
1723     #include <linux/of_graph.h>
1724    
1725     +struct lvds_encoder {
1726     + struct drm_bridge bridge;
1727     + struct drm_bridge *panel_bridge;
1728     +};
1729     +
1730     +static int lvds_encoder_attach(struct drm_bridge *bridge)
1731     +{
1732     + struct lvds_encoder *lvds_encoder = container_of(bridge,
1733     + struct lvds_encoder,
1734     + bridge);
1735     +
1736     + return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
1737     + bridge);
1738     +}
1739     +
1740     +static struct drm_bridge_funcs funcs = {
1741     + .attach = lvds_encoder_attach,
1742     +};
1743     +
1744     static int lvds_encoder_probe(struct platform_device *pdev)
1745     {
1746     struct device_node *port;
1747     struct device_node *endpoint;
1748     struct device_node *panel_node;
1749     struct drm_panel *panel;
1750     - struct drm_bridge *bridge;
1751     + struct lvds_encoder *lvds_encoder;
1752     +
1753     + lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
1754     + GFP_KERNEL);
1755     + if (!lvds_encoder)
1756     + return -ENOMEM;
1757    
1758     /* Locate the panel DT node. */
1759     port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
1760     @@ -49,20 +73,30 @@ static int lvds_encoder_probe(struct platform_device *pdev)
1761     return -EPROBE_DEFER;
1762     }
1763    
1764     - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
1765     - if (IS_ERR(bridge))
1766     - return PTR_ERR(bridge);
1767     + lvds_encoder->panel_bridge =
1768     + devm_drm_panel_bridge_add(&pdev->dev,
1769     + panel, DRM_MODE_CONNECTOR_LVDS);
1770     + if (IS_ERR(lvds_encoder->panel_bridge))
1771     + return PTR_ERR(lvds_encoder->panel_bridge);
1772     +
1773     + /* The panel_bridge bridge is attached to the panel's of_node,
1774     + * but we need a bridge attached to our of_node for our user
1775     + * to look up.
1776     + */
1777     + lvds_encoder->bridge.of_node = pdev->dev.of_node;
1778     + lvds_encoder->bridge.funcs = &funcs;
1779     + drm_bridge_add(&lvds_encoder->bridge);
1780    
1781     - platform_set_drvdata(pdev, bridge);
1782     + platform_set_drvdata(pdev, lvds_encoder);
1783    
1784     return 0;
1785     }
1786    
1787     static int lvds_encoder_remove(struct platform_device *pdev)
1788     {
1789     - struct drm_bridge *bridge = platform_get_drvdata(pdev);
1790     + struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
1791    
1792     - drm_bridge_remove(bridge);
1793     + drm_bridge_remove(&lvds_encoder->bridge);
1794    
1795     return 0;
1796     }
1797     diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
1798     index 8571cfd877c5..8636e7eeb731 100644
1799     --- a/drivers/gpu/drm/bridge/tc358767.c
1800     +++ b/drivers/gpu/drm/bridge/tc358767.c
1801     @@ -97,7 +97,7 @@
1802     #define DP0_ACTIVEVAL 0x0650
1803     #define DP0_SYNCVAL 0x0654
1804     #define DP0_MISC 0x0658
1805     -#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
1806     +#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
1807     #define BPC_6 (0 << 5)
1808     #define BPC_8 (1 << 5)
1809    
1810     @@ -318,7 +318,7 @@ static ssize_t tc_aux_transfer(struct drm_dp_aux *aux,
1811     tmp = (tmp << 8) | buf[i];
1812     i++;
1813     if (((i % 4) == 0) || (i == size)) {
1814     - tc_write(DP0_AUXWDATA(i >> 2), tmp);
1815     + tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
1816     tmp = 0;
1817     }
1818     }
1819     @@ -603,8 +603,15 @@ static int tc_get_display_props(struct tc_data *tc)
1820     ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
1821     if (ret < 0)
1822     goto err_dpcd_read;
1823     - if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
1824     - goto err_dpcd_inval;
1825     + if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
1826     + dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
1827     + tc->link.base.rate = 270000;
1828     + }
1829     +
1830     + if (tc->link.base.num_lanes > 2) {
1831     + dev_dbg(tc->dev, "Falling to 2 lanes\n");
1832     + tc->link.base.num_lanes = 2;
1833     + }
1834    
1835     ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
1836     if (ret < 0)
1837     @@ -637,9 +644,6 @@ static int tc_get_display_props(struct tc_data *tc)
1838     err_dpcd_read:
1839     dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
1840     return ret;
1841     -err_dpcd_inval:
1842     - dev_err(tc->dev, "invalid DPCD\n");
1843     - return -EINVAL;
1844     }
1845    
1846     static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
1847     @@ -655,6 +659,14 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
1848     int lower_margin = mode->vsync_start - mode->vdisplay;
1849     int vsync_len = mode->vsync_end - mode->vsync_start;
1850    
1851     + /*
1852     + * Recommended maximum number of symbols transferred in a transfer unit:
1853     + * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
1854     + * (output active video bandwidth in bytes))
1855     + * Must be less than tu_size.
1856     + */
1857     + max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
1858     +
1859     dev_dbg(tc->dev, "set mode %dx%d\n",
1860     mode->hdisplay, mode->vdisplay);
1861     dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
1862     @@ -664,13 +676,18 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
1863     dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
1864    
1865    
1866     - /* LCD Ctl Frame Size */
1867     - tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
1868     + /*
1869     + * LCD Ctl Frame Size
1870     + * datasheet is not clear of vsdelay in case of DPI
1871     + * assume we do not need any delay when DPI is a source of
1872     + * sync signals
1873     + */
1874     + tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
1875     OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
1876     - tc_write(HTIM01, (left_margin << 16) | /* H back porch */
1877     - (hsync_len << 0)); /* Hsync */
1878     - tc_write(HTIM02, (right_margin << 16) | /* H front porch */
1879     - (mode->hdisplay << 0)); /* width */
1880     + tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
1881     + (ALIGN(hsync_len, 2) << 0)); /* Hsync */
1882     + tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
1883     + (ALIGN(mode->hdisplay, 2) << 0)); /* width */
1884     tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
1885     (vsync_len << 0)); /* Vsync */
1886     tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
1887     @@ -689,7 +706,7 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
1888     /* DP Main Stream Attributes */
1889     vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
1890     tc_write(DP0_VIDSYNCDELAY,
1891     - (0x003e << 16) | /* thresh_dly */
1892     + (max_tu_symbol << 16) | /* thresh_dly */
1893     (vid_sync_dly << 0));
1894    
1895     tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
1896     @@ -705,14 +722,8 @@ static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
1897     tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
1898     DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
1899    
1900     - /*
1901     - * Recommended maximum number of symbols transferred in a transfer unit:
1902     - * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
1903     - * (output active video bandwidth in bytes))
1904     - * Must be less than tu_size.
1905     - */
1906     - max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
1907     - tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
1908     + tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
1909     + BPC_8);
1910    
1911     return 0;
1912     err:
1913     @@ -808,8 +819,6 @@ static int tc_main_link_setup(struct tc_data *tc)
1914     unsigned int rate;
1915     u32 dp_phy_ctrl;
1916     int timeout;
1917     - bool aligned;
1918     - bool ready;
1919     u32 value;
1920     int ret;
1921     u8 tmp[8];
1922     @@ -954,16 +963,15 @@ static int tc_main_link_setup(struct tc_data *tc)
1923     ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
1924     if (ret < 0)
1925     goto err_dpcd_read;
1926     - ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
1927     - DP_CHANNEL_EQ_BITS)); /* Lane0 */
1928     - aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
1929     - } while ((--timeout) && !(ready && aligned));
1930     + } while ((--timeout) &&
1931     + !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes)));
1932    
1933     if (timeout == 0) {
1934     /* Read DPCD 0x200-0x201 */
1935     ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
1936     if (ret < 0)
1937     goto err_dpcd_read;
1938     + dev_err(dev, "channel(s) EQ not ok\n");
1939     dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
1940     dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
1941     tmp[1]);
1942     @@ -974,10 +982,6 @@ static int tc_main_link_setup(struct tc_data *tc)
1943     dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
1944     tmp[6]);
1945    
1946     - if (!ready)
1947     - dev_err(dev, "Lane0/1 not ready\n");
1948     - if (!aligned)
1949     - dev_err(dev, "Lane0/1 not aligned\n");
1950     return -EAGAIN;
1951     }
1952    
1953     @@ -1099,7 +1103,10 @@ static bool tc_bridge_mode_fixup(struct drm_bridge *bridge,
1954     static int tc_connector_mode_valid(struct drm_connector *connector,
1955     struct drm_display_mode *mode)
1956     {
1957     - /* Accept any mode */
1958     + /* DPI interface clock limitation: upto 154 MHz */
1959     + if (mode->clock > 154000)
1960     + return MODE_CLOCK_HIGH;
1961     +
1962     return MODE_OK;
1963     }
1964    
1965     diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
1966     index c226da145fb3..a349cb61961e 100644
1967     --- a/drivers/gpu/drm/omapdrm/displays/Kconfig
1968     +++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
1969     @@ -35,6 +35,7 @@ config DRM_OMAP_CONNECTOR_ANALOG_TV
1970    
1971     config DRM_OMAP_PANEL_DPI
1972     tristate "Generic DPI panel"
1973     + depends on BACKLIGHT_CLASS_DEVICE
1974     help
1975     Driver for generic DPI panels.
1976    
1977     diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1978     index 1dd3dafc59af..c60a85e82c6d 100644
1979     --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1980     +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
1981     @@ -638,7 +638,8 @@ static int omap_dmm_probe(struct platform_device *dev)
1982     match = of_match_node(dmm_of_match, dev->dev.of_node);
1983     if (!match) {
1984     dev_err(&dev->dev, "failed to find matching device node\n");
1985     - return -ENODEV;
1986     + ret = -ENODEV;
1987     + goto fail;
1988     }
1989    
1990     omap_dmm->plat_data = match->data;
1991     diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
1992     index 9a20b9dc27c8..f7fc652b0027 100644
1993     --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
1994     +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi.c
1995     @@ -1275,8 +1275,6 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
1996     goto err_pllref;
1997     }
1998    
1999     - pm_runtime_enable(dev);
2000     -
2001     dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
2002     dsi->dsi_host.dev = dev;
2003     ret = mipi_dsi_host_register(&dsi->dsi_host);
2004     @@ -1291,6 +1289,7 @@ static int dw_mipi_dsi_bind(struct device *dev, struct device *master,
2005     }
2006    
2007     dev_set_drvdata(dev, dsi);
2008     + pm_runtime_enable(dev);
2009     return 0;
2010    
2011     err_mipi_dsi_host:
2012     diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
2013     index 7d7af3a93d94..521addec831e 100644
2014     --- a/drivers/gpu/drm/vc4/vc4_irq.c
2015     +++ b/drivers/gpu/drm/vc4/vc4_irq.c
2016     @@ -225,6 +225,9 @@ vc4_irq_uninstall(struct drm_device *dev)
2017     /* Clear any pending interrupts we might have left. */
2018     V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
2019    
2020     + /* Finish any interrupt handler still in flight. */
2021     + disable_irq(dev->irq);
2022     +
2023     cancel_work_sync(&vc4->overflow_mem_work);
2024     }
2025    
2026     diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
2027     index 622cd43840b8..493f392b3a0a 100644
2028     --- a/drivers/gpu/drm/vc4/vc4_v3d.c
2029     +++ b/drivers/gpu/drm/vc4/vc4_v3d.c
2030     @@ -327,6 +327,9 @@ static int vc4_v3d_runtime_resume(struct device *dev)
2031     return ret;
2032    
2033     vc4_v3d_init_hw(vc4->dev);
2034     +
2035     + /* We disabled the IRQ as part of vc4_irq_uninstall in suspend. */
2036     + enable_irq(vc4->dev->irq);
2037     vc4_irq_postinstall(vc4->dev);
2038    
2039     return 0;
2040     diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
2041     index 906e654fb0ba..65f1cfbbe7fe 100644
2042     --- a/drivers/hid/wacom_sys.c
2043     +++ b/drivers/hid/wacom_sys.c
2044     @@ -2340,23 +2340,23 @@ static void wacom_remote_destroy_one(struct wacom *wacom, unsigned int index)
2045     int i;
2046     unsigned long flags;
2047    
2048     - spin_lock_irqsave(&remote->remote_lock, flags);
2049     - remote->remotes[index].registered = false;
2050     - spin_unlock_irqrestore(&remote->remote_lock, flags);
2051     + for (i = 0; i < WACOM_MAX_REMOTES; i++) {
2052     + if (remote->remotes[i].serial == serial) {
2053    
2054     - if (remote->remotes[index].battery.battery)
2055     - devres_release_group(&wacom->hdev->dev,
2056     - &remote->remotes[index].battery.bat_desc);
2057     + spin_lock_irqsave(&remote->remote_lock, flags);
2058     + remote->remotes[i].registered = false;
2059     + spin_unlock_irqrestore(&remote->remote_lock, flags);
2060    
2061     - if (remote->remotes[index].group.name)
2062     - devres_release_group(&wacom->hdev->dev,
2063     - &remote->remotes[index]);
2064     + if (remote->remotes[i].battery.battery)
2065     + devres_release_group(&wacom->hdev->dev,
2066     + &remote->remotes[i].battery.bat_desc);
2067     +
2068     + if (remote->remotes[i].group.name)
2069     + devres_release_group(&wacom->hdev->dev,
2070     + &remote->remotes[i]);
2071    
2072     - for (i = 0; i < WACOM_MAX_REMOTES; i++) {
2073     - if (remote->remotes[i].serial == serial) {
2074     remote->remotes[i].serial = 0;
2075     remote->remotes[i].group.name = NULL;
2076     - remote->remotes[i].registered = false;
2077     remote->remotes[i].battery.battery = NULL;
2078     wacom->led.groups[i].select = WACOM_STATUS_UNKNOWN;
2079     }
2080     diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
2081     index aa692e28b2cd..70cbe1e5a3d2 100644
2082     --- a/drivers/hid/wacom_wac.c
2083     +++ b/drivers/hid/wacom_wac.c
2084     @@ -1924,7 +1924,6 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
2085     struct wacom_features *features = &wacom_wac->features;
2086     unsigned equivalent_usage = wacom_equivalent_usage(usage->hid);
2087     int i;
2088     - bool is_touch_on = value;
2089     bool do_report = false;
2090    
2091     /*
2092     @@ -1969,16 +1968,17 @@ static void wacom_wac_pad_event(struct hid_device *hdev, struct hid_field *field
2093     break;
2094    
2095     case WACOM_HID_WD_MUTE_DEVICE:
2096     - if (wacom_wac->shared->touch_input && value) {
2097     - wacom_wac->shared->is_touch_on = !wacom_wac->shared->is_touch_on;
2098     - is_touch_on = wacom_wac->shared->is_touch_on;
2099     - }
2100     -
2101     - /* fall through*/
2102     case WACOM_HID_WD_TOUCHONOFF:
2103     if (wacom_wac->shared->touch_input) {
2104     + bool *is_touch_on = &wacom_wac->shared->is_touch_on;
2105     +
2106     + if (equivalent_usage == WACOM_HID_WD_MUTE_DEVICE && value)
2107     + *is_touch_on = !(*is_touch_on);
2108     + else if (equivalent_usage == WACOM_HID_WD_TOUCHONOFF)
2109     + *is_touch_on = value;
2110     +
2111     input_report_switch(wacom_wac->shared->touch_input,
2112     - SW_MUTE_DEVICE, !is_touch_on);
2113     + SW_MUTE_DEVICE, !(*is_touch_on));
2114     input_sync(wacom_wac->shared->touch_input);
2115     }
2116     break;
2117     diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
2118     index 52a58b8b6e1b..a139940cd991 100644
2119     --- a/drivers/hwmon/pmbus/pmbus_core.c
2120     +++ b/drivers/hwmon/pmbus/pmbus_core.c
2121     @@ -21,6 +21,7 @@
2122    
2123     #include <linux/debugfs.h>
2124     #include <linux/kernel.h>
2125     +#include <linux/math64.h>
2126     #include <linux/module.h>
2127     #include <linux/init.h>
2128     #include <linux/err.h>
2129     @@ -499,8 +500,8 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
2130     static long pmbus_reg2data_direct(struct pmbus_data *data,
2131     struct pmbus_sensor *sensor)
2132     {
2133     - long val = (s16) sensor->data;
2134     - long m, b, R;
2135     + s64 b, val = (s16)sensor->data;
2136     + s32 m, R;
2137    
2138     m = data->info->m[sensor->class];
2139     b = data->info->b[sensor->class];
2140     @@ -528,11 +529,12 @@ static long pmbus_reg2data_direct(struct pmbus_data *data,
2141     R--;
2142     }
2143     while (R < 0) {
2144     - val = DIV_ROUND_CLOSEST(val, 10);
2145     + val = div_s64(val + 5LL, 10L); /* round closest */
2146     R++;
2147     }
2148    
2149     - return (val - b) / m;
2150     + val = div_s64(val - b, m);
2151     + return clamp_val(val, LONG_MIN, LONG_MAX);
2152     }
2153    
2154     /*
2155     @@ -656,7 +658,8 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
2156     static u16 pmbus_data2reg_direct(struct pmbus_data *data,
2157     struct pmbus_sensor *sensor, long val)
2158     {
2159     - long m, b, R;
2160     + s64 b, val64 = val;
2161     + s32 m, R;
2162    
2163     m = data->info->m[sensor->class];
2164     b = data->info->b[sensor->class];
2165     @@ -673,18 +676,18 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
2166     R -= 3; /* Adjust R and b for data in milli-units */
2167     b *= 1000;
2168     }
2169     - val = val * m + b;
2170     + val64 = val64 * m + b;
2171    
2172     while (R > 0) {
2173     - val *= 10;
2174     + val64 *= 10;
2175     R--;
2176     }
2177     while (R < 0) {
2178     - val = DIV_ROUND_CLOSEST(val, 10);
2179     + val64 = div_s64(val64 + 5LL, 10L); /* round closest */
2180     R++;
2181     }
2182    
2183     - return val;
2184     + return (u16)clamp_val(val64, S16_MIN, S16_MAX);
2185     }
2186    
2187     static u16 pmbus_data2reg_vid(struct pmbus_data *data,
2188     diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
2189     index 31186ead5a40..509a6007cdf6 100644
2190     --- a/drivers/i2c/i2c-boardinfo.c
2191     +++ b/drivers/i2c/i2c-boardinfo.c
2192     @@ -86,6 +86,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
2193     property_entries_dup(info->properties);
2194     if (IS_ERR(devinfo->board_info.properties)) {
2195     status = PTR_ERR(devinfo->board_info.properties);
2196     + kfree(devinfo);
2197     break;
2198     }
2199     }
2200     @@ -98,6 +99,7 @@ int i2c_register_board_info(int busnum, struct i2c_board_info const *info, unsig
2201     GFP_KERNEL);
2202     if (!devinfo->board_info.resources) {
2203     status = -ENOMEM;
2204     + kfree(devinfo);
2205     break;
2206     }
2207     }
2208     diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c
2209     index 4df32cf1650e..172753b14a4f 100644
2210     --- a/drivers/iio/adc/stm32-adc.c
2211     +++ b/drivers/iio/adc/stm32-adc.c
2212     @@ -1314,6 +1314,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
2213     {
2214     struct stm32_adc *adc = iio_priv(indio_dev);
2215     unsigned int watermark = STM32_DMA_BUFFER_SIZE / 2;
2216     + unsigned int rx_buf_sz = STM32_DMA_BUFFER_SIZE;
2217    
2218     /*
2219     * dma cyclic transfers are used, buffer is split into two periods.
2220     @@ -1322,7 +1323,7 @@ static int stm32_adc_set_watermark(struct iio_dev *indio_dev, unsigned int val)
2221     * - one buffer (period) driver can push with iio_trigger_poll().
2222     */
2223     watermark = min(watermark, val * (unsigned)(sizeof(u16)));
2224     - adc->rx_buf_sz = watermark * 2;
2225     + adc->rx_buf_sz = min(rx_buf_sz, watermark * 2 * adc->num_conv);
2226    
2227     return 0;
2228     }
2229     diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c
2230     index 840a6cbd5f0f..8cfac6d1cec4 100644
2231     --- a/drivers/iio/chemical/ccs811.c
2232     +++ b/drivers/iio/chemical/ccs811.c
2233     @@ -91,7 +91,6 @@ static const struct iio_chan_spec ccs811_channels[] = {
2234     .channel2 = IIO_MOD_CO2,
2235     .modified = 1,
2236     .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
2237     - BIT(IIO_CHAN_INFO_OFFSET) |
2238     BIT(IIO_CHAN_INFO_SCALE),
2239     .scan_index = 0,
2240     .scan_type = {
2241     @@ -245,24 +244,18 @@ static int ccs811_read_raw(struct iio_dev *indio_dev,
2242     switch (chan->channel2) {
2243     case IIO_MOD_CO2:
2244     *val = 0;
2245     - *val2 = 12834;
2246     + *val2 = 100;
2247     return IIO_VAL_INT_PLUS_MICRO;
2248     case IIO_MOD_VOC:
2249     *val = 0;
2250     - *val2 = 84246;
2251     - return IIO_VAL_INT_PLUS_MICRO;
2252     + *val2 = 100;
2253     + return IIO_VAL_INT_PLUS_NANO;
2254     default:
2255     return -EINVAL;
2256     }
2257     default:
2258     return -EINVAL;
2259     }
2260     - case IIO_CHAN_INFO_OFFSET:
2261     - if (!(chan->type == IIO_CONCENTRATION &&
2262     - chan->channel2 == IIO_MOD_CO2))
2263     - return -EINVAL;
2264     - *val = -400;
2265     - return IIO_VAL_INT;
2266     default:
2267     return -EINVAL;
2268     }
2269     diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
2270     index 141ea228aac6..f5954981e9ee 100644
2271     --- a/drivers/input/rmi4/rmi_driver.c
2272     +++ b/drivers/input/rmi4/rmi_driver.c
2273     @@ -41,6 +41,13 @@ void rmi_free_function_list(struct rmi_device *rmi_dev)
2274    
2275     rmi_dbg(RMI_DEBUG_CORE, &rmi_dev->dev, "Freeing function list\n");
2276    
2277     + /* Doing it in the reverse order so F01 will be removed last */
2278     + list_for_each_entry_safe_reverse(fn, tmp,
2279     + &data->function_list, node) {
2280     + list_del(&fn->node);
2281     + rmi_unregister_function(fn);
2282     + }
2283     +
2284     devm_kfree(&rmi_dev->dev, data->irq_memory);
2285     data->irq_memory = NULL;
2286     data->irq_status = NULL;
2287     @@ -50,13 +57,6 @@ void rmi_free_function_list(struct rmi_device *rmi_dev)
2288    
2289     data->f01_container = NULL;
2290     data->f34_container = NULL;
2291     -
2292     - /* Doing it in the reverse order so F01 will be removed last */
2293     - list_for_each_entry_safe_reverse(fn, tmp,
2294     - &data->function_list, node) {
2295     - list_del(&fn->node);
2296     - rmi_unregister_function(fn);
2297     - }
2298     }
2299    
2300     static int reset_one_function(struct rmi_function *fn)
2301     diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
2302     index ad71a5e768dc..7ccbb370a9a8 100644
2303     --- a/drivers/input/rmi4/rmi_f03.c
2304     +++ b/drivers/input/rmi4/rmi_f03.c
2305     @@ -32,6 +32,7 @@ struct f03_data {
2306     struct rmi_function *fn;
2307    
2308     struct serio *serio;
2309     + bool serio_registered;
2310    
2311     unsigned int overwrite_buttons;
2312    
2313     @@ -138,6 +139,37 @@ static int rmi_f03_initialize(struct f03_data *f03)
2314     return 0;
2315     }
2316    
2317     +static int rmi_f03_pt_open(struct serio *serio)
2318     +{
2319     + struct f03_data *f03 = serio->port_data;
2320     + struct rmi_function *fn = f03->fn;
2321     + const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
2322     + const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET;
2323     + u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
2324     + int error;
2325     +
2326     + /*
2327     + * Consume any pending data. Some devices like to spam with
2328     + * 0xaa 0x00 announcements which may confuse us as we try to
2329     + * probe the device.
2330     + */
2331     + error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len);
2332     + if (!error)
2333     + rmi_dbg(RMI_DEBUG_FN, &fn->dev,
2334     + "%s: Consumed %*ph (%d) from PS2 guest\n",
2335     + __func__, ob_len, obs, ob_len);
2336     +
2337     + return fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
2338     +}
2339     +
2340     +static void rmi_f03_pt_close(struct serio *serio)
2341     +{
2342     + struct f03_data *f03 = serio->port_data;
2343     + struct rmi_function *fn = f03->fn;
2344     +
2345     + fn->rmi_dev->driver->clear_irq_bits(fn->rmi_dev, fn->irq_mask);
2346     +}
2347     +
2348     static int rmi_f03_register_pt(struct f03_data *f03)
2349     {
2350     struct serio *serio;
2351     @@ -148,6 +180,8 @@ static int rmi_f03_register_pt(struct f03_data *f03)
2352    
2353     serio->id.type = SERIO_PS_PSTHRU;
2354     serio->write = rmi_f03_pt_write;
2355     + serio->open = rmi_f03_pt_open;
2356     + serio->close = rmi_f03_pt_close;
2357     serio->port_data = f03;
2358    
2359     strlcpy(serio->name, "Synaptics RMI4 PS/2 pass-through",
2360     @@ -184,17 +218,27 @@ static int rmi_f03_probe(struct rmi_function *fn)
2361     f03->device_count);
2362    
2363     dev_set_drvdata(dev, f03);
2364     -
2365     - error = rmi_f03_register_pt(f03);
2366     - if (error)
2367     - return error;
2368     -
2369     return 0;
2370     }
2371    
2372     static int rmi_f03_config(struct rmi_function *fn)
2373     {
2374     - fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
2375     + struct f03_data *f03 = dev_get_drvdata(&fn->dev);
2376     + int error;
2377     +
2378     + if (!f03->serio_registered) {
2379     + error = rmi_f03_register_pt(f03);
2380     + if (error)
2381     + return error;
2382     +
2383     + f03->serio_registered = true;
2384     + } else {
2385     + /*
2386     + * We must be re-configuring the sensor, just enable
2387     + * interrupts for this function.
2388     + */
2389     + fn->rmi_dev->driver->set_irq_bits(fn->rmi_dev, fn->irq_mask);
2390     + }
2391    
2392     return 0;
2393     }
2394     @@ -204,7 +248,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
2395     struct rmi_device *rmi_dev = fn->rmi_dev;
2396     struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
2397     struct f03_data *f03 = dev_get_drvdata(&fn->dev);
2398     - u16 data_addr = fn->fd.data_base_addr;
2399     + const u16 data_addr = fn->fd.data_base_addr + RMI_F03_OB_OFFSET;
2400     const u8 ob_len = f03->rx_queue_length * RMI_F03_OB_SIZE;
2401     u8 obs[RMI_F03_QUEUE_LENGTH * RMI_F03_OB_SIZE];
2402     u8 ob_status;
2403     @@ -226,8 +270,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
2404     drvdata->attn_data.size -= ob_len;
2405     } else {
2406     /* Grab all of the data registers, and check them for data */
2407     - error = rmi_read_block(fn->rmi_dev, data_addr + RMI_F03_OB_OFFSET,
2408     - &obs, ob_len);
2409     + error = rmi_read_block(fn->rmi_dev, data_addr, &obs, ob_len);
2410     if (error) {
2411     dev_err(&fn->dev,
2412     "%s: Failed to read F03 output buffers: %d\n",
2413     @@ -266,7 +309,8 @@ static void rmi_f03_remove(struct rmi_function *fn)
2414     {
2415     struct f03_data *f03 = dev_get_drvdata(&fn->dev);
2416    
2417     - serio_unregister_port(f03->serio);
2418     + if (f03->serio_registered)
2419     + serio_unregister_port(f03->serio);
2420     }
2421    
2422     struct rmi_function_handler rmi_f03_handler = {
2423     diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
2424     index 658c54b3b07a..1598d1e04989 100644
2425     --- a/drivers/md/bcache/btree.c
2426     +++ b/drivers/md/bcache/btree.c
2427     @@ -807,7 +807,10 @@ int bch_btree_cache_alloc(struct cache_set *c)
2428     c->shrink.scan_objects = bch_mca_scan;
2429     c->shrink.seeks = 4;
2430     c->shrink.batch = c->btree_pages * 2;
2431     - register_shrinker(&c->shrink);
2432     +
2433     + if (register_shrinker(&c->shrink))
2434     + pr_warn("bcache: %s: could not register shrinker",
2435     + __func__);
2436    
2437     return 0;
2438     }
2439     diff --git a/drivers/media/usb/usbtv/usbtv-core.c b/drivers/media/usb/usbtv/usbtv-core.c
2440     index f06f09a0876e..71fb5734995b 100644
2441     --- a/drivers/media/usb/usbtv/usbtv-core.c
2442     +++ b/drivers/media/usb/usbtv/usbtv-core.c
2443     @@ -144,6 +144,7 @@ static void usbtv_disconnect(struct usb_interface *intf)
2444    
2445     static const struct usb_device_id usbtv_id_table[] = {
2446     { USB_DEVICE(0x1b71, 0x3002) },
2447     + { USB_DEVICE(0x1f71, 0x3301) },
2448     {}
2449     };
2450     MODULE_DEVICE_TABLE(usb, usbtv_id_table);
2451     diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
2452     index 78b3172c8e6e..d46cb1f0868f 100644
2453     --- a/drivers/misc/mei/pci-me.c
2454     +++ b/drivers/misc/mei/pci-me.c
2455     @@ -238,8 +238,11 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2456     */
2457     mei_me_set_pm_domain(dev);
2458    
2459     - if (mei_pg_is_enabled(dev))
2460     + if (mei_pg_is_enabled(dev)) {
2461     pm_runtime_put_noidle(&pdev->dev);
2462     + if (hw->d0i3_supported)
2463     + pm_runtime_allow(&pdev->dev);
2464     + }
2465    
2466     dev_dbg(&pdev->dev, "initialization successful.\n");
2467    
2468     diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c
2469     index 81370c79aa48..7ad0db65a6fa 100644
2470     --- a/drivers/mtd/nand/denali_pci.c
2471     +++ b/drivers/mtd/nand/denali_pci.c
2472     @@ -124,3 +124,7 @@ static struct pci_driver denali_pci_driver = {
2473     };
2474    
2475     module_pci_driver(denali_pci_driver);
2476     +
2477     +MODULE_DESCRIPTION("PCI driver for Denali NAND controller");
2478     +MODULE_AUTHOR("Intel Corporation and its suppliers");
2479     +MODULE_LICENSE("GPL v2");
2480     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2481     index 3cbe771b3352..a22336fef66b 100644
2482     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2483     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
2484     @@ -2133,8 +2133,8 @@ static int bnxt_get_module_eeprom(struct net_device *dev,
2485     /* Read A2 portion of the EEPROM */
2486     if (length) {
2487     start -= ETH_MODULE_SFF_8436_LEN;
2488     - bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
2489     - length, data);
2490     + rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
2491     + start, length, data);
2492     }
2493     return rc;
2494     }
2495     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2496     index 667dbc7d4a4e..d1a44a84c97e 100644
2497     --- a/drivers/net/ethernet/intel/igb/igb_main.c
2498     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
2499     @@ -3331,7 +3331,7 @@ static int __igb_close(struct net_device *netdev, bool suspending)
2500    
2501     int igb_close(struct net_device *netdev)
2502     {
2503     - if (netif_device_present(netdev))
2504     + if (netif_device_present(netdev) || netdev->dismantle)
2505     return __igb_close(netdev, false);
2506     return 0;
2507     }
2508     diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
2509     index d147dc7d0f77..1dd3a1264a53 100644
2510     --- a/drivers/net/ethernet/marvell/mvpp2.c
2511     +++ b/drivers/net/ethernet/marvell/mvpp2.c
2512     @@ -5597,7 +5597,7 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
2513     sizeof(*txq_pcpu->buffs),
2514     GFP_KERNEL);
2515     if (!txq_pcpu->buffs)
2516     - goto cleanup;
2517     + return -ENOMEM;
2518    
2519     txq_pcpu->count = 0;
2520     txq_pcpu->reserved_num = 0;
2521     @@ -5610,26 +5610,10 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
2522     &txq_pcpu->tso_headers_dma,
2523     GFP_KERNEL);
2524     if (!txq_pcpu->tso_headers)
2525     - goto cleanup;
2526     + return -ENOMEM;
2527     }
2528    
2529     return 0;
2530     -cleanup:
2531     - for_each_present_cpu(cpu) {
2532     - txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
2533     - kfree(txq_pcpu->buffs);
2534     -
2535     - dma_free_coherent(port->dev->dev.parent,
2536     - txq_pcpu->size * TSO_HEADER_SIZE,
2537     - txq_pcpu->tso_headers,
2538     - txq_pcpu->tso_headers_dma);
2539     - }
2540     -
2541     - dma_free_coherent(port->dev->dev.parent,
2542     - txq->size * MVPP2_DESC_ALIGNED_SIZE,
2543     - txq->descs, txq->descs_dma);
2544     -
2545     - return -ENOMEM;
2546     }
2547    
2548     /* Free allocated TXQ resources */
2549     diff --git a/drivers/net/ethernet/xilinx/Kconfig b/drivers/net/ethernet/xilinx/Kconfig
2550     index 6d68c8a8f4f2..da4ec575ccf9 100644
2551     --- a/drivers/net/ethernet/xilinx/Kconfig
2552     +++ b/drivers/net/ethernet/xilinx/Kconfig
2553     @@ -34,6 +34,7 @@ config XILINX_AXI_EMAC
2554     config XILINX_LL_TEMAC
2555     tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
2556     depends on (PPC || MICROBLAZE)
2557     + depends on !64BIT || BROKEN
2558     select PHYLIB
2559     ---help---
2560     This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
2561     diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c
2562     index aebc08beceba..21b3f36e023a 100644
2563     --- a/drivers/net/phy/marvell10g.c
2564     +++ b/drivers/net/phy/marvell10g.c
2565     @@ -16,6 +16,7 @@
2566     * link takes priority and the other port is completely locked out.
2567     */
2568     #include <linux/phy.h>
2569     +#include <linux/marvell_phy.h>
2570    
2571     enum {
2572     MV_PCS_BASE_T = 0x0000,
2573     @@ -338,7 +339,7 @@ static int mv3310_read_status(struct phy_device *phydev)
2574     static struct phy_driver mv3310_drivers[] = {
2575     {
2576     .phy_id = 0x002b09aa,
2577     - .phy_id_mask = 0xffffffff,
2578     + .phy_id_mask = MARVELL_PHY_ID_MASK,
2579     .name = "mv88x3310",
2580     .features = SUPPORTED_10baseT_Full |
2581     SUPPORTED_100baseT_Full |
2582     @@ -360,7 +361,7 @@ static struct phy_driver mv3310_drivers[] = {
2583     module_phy_driver(mv3310_drivers);
2584    
2585     static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
2586     - { 0x002b09aa, 0xffffffff },
2587     + { 0x002b09aa, MARVELL_PHY_ID_MASK },
2588     { },
2589     };
2590     MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
2591     diff --git a/drivers/net/wireless/ath/ath9k/channel.c b/drivers/net/wireless/ath/ath9k/channel.c
2592     index f0439f2d566b..173891b11b2d 100644
2593     --- a/drivers/net/wireless/ath/ath9k/channel.c
2594     +++ b/drivers/net/wireless/ath/ath9k/channel.c
2595     @@ -1112,7 +1112,7 @@ ath_chanctx_send_vif_ps_frame(struct ath_softc *sc, struct ath_vif *avp,
2596     if (!avp->assoc)
2597     return false;
2598    
2599     - skb = ieee80211_nullfunc_get(sc->hw, vif);
2600     + skb = ieee80211_nullfunc_get(sc->hw, vif, false);
2601     if (!skb)
2602     return false;
2603    
2604     diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
2605     index 9c889a32fe24..223fb77a3aa9 100644
2606     --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
2607     +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h
2608     @@ -209,8 +209,6 @@ static inline void iwl_fw_dbg_stop_recording(struct iwl_fw_runtime *fwrt)
2609    
2610     static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt)
2611     {
2612     - iwl_fw_dbg_stop_recording(fwrt);
2613     -
2614     fwrt->dump.conf = FW_DBG_INVALID;
2615     }
2616    
2617     diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
2618     index 53e269d54050..0ae7624eac9d 100644
2619     --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
2620     +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c
2621     @@ -1181,6 +1181,8 @@ unsigned int iwl_mvm_get_wd_timeout(struct iwl_mvm *mvm,
2622     return le32_to_cpu(txq_timer->p2p_go);
2623     case NL80211_IFTYPE_P2P_DEVICE:
2624     return le32_to_cpu(txq_timer->p2p_device);
2625     + case NL80211_IFTYPE_MONITOR:
2626     + return default_timeout;
2627     default:
2628     WARN_ON(1);
2629     return mvm->cfg->base_params->wd_timeout;
2630     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
2631     index c59f4581e972..ac05fd1e74c4 100644
2632     --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
2633     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c
2634     @@ -49,6 +49,7 @@
2635     *
2636     *****************************************************************************/
2637     #include "iwl-trans.h"
2638     +#include "iwl-prph.h"
2639     #include "iwl-context-info.h"
2640     #include "internal.h"
2641    
2642     @@ -156,6 +157,11 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
2643    
2644     trans_pcie->is_down = true;
2645    
2646     + /* Stop dbgc before stopping device */
2647     + iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
2648     + udelay(100);
2649     + iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
2650     +
2651     /* tell the device to stop sending interrupts */
2652     iwl_disable_interrupts(trans);
2653    
2654     diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2655     index 2e3e013ec95a..12a9b86d71ea 100644
2656     --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2657     +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
2658     @@ -1138,6 +1138,15 @@ static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
2659    
2660     trans_pcie->is_down = true;
2661    
2662     + /* Stop dbgc before stopping device */
2663     + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
2664     + iwl_set_bits_prph(trans, MON_BUFF_SAMPLE_CTL, 0x100);
2665     + } else {
2666     + iwl_write_prph(trans, DBGC_IN_SAMPLE, 0);
2667     + udelay(100);
2668     + iwl_write_prph(trans, DBGC_OUT_CTRL, 0);
2669     + }
2670     +
2671     /* tell the device to stop sending interrupts */
2672     iwl_disable_interrupts(trans);
2673    
2674     diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c
2675     index a52224836a2b..666b88cb2cfe 100644
2676     --- a/drivers/net/wireless/st/cw1200/sta.c
2677     +++ b/drivers/net/wireless/st/cw1200/sta.c
2678     @@ -198,7 +198,7 @@ void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv,
2679    
2680     priv->bss_loss_state++;
2681    
2682     - skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
2683     + skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
2684     WARN_ON(!skb);
2685     if (skb)
2686     cw1200_tx(priv->hw, NULL, skb);
2687     @@ -2266,7 +2266,7 @@ static int cw1200_upload_null(struct cw1200_common *priv)
2688     .rate = 0xFF,
2689     };
2690    
2691     - frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
2692     + frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
2693     if (!frame.skb)
2694     return -ENOMEM;
2695    
2696     diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c
2697     index 9915d83a4a30..6d02c660b4ab 100644
2698     --- a/drivers/net/wireless/ti/wl1251/main.c
2699     +++ b/drivers/net/wireless/ti/wl1251/main.c
2700     @@ -566,7 +566,7 @@ static int wl1251_build_null_data(struct wl1251 *wl)
2701     size = sizeof(struct wl12xx_null_data_template);
2702     ptr = NULL;
2703     } else {
2704     - skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
2705     + skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
2706     if (!skb)
2707     goto out;
2708     size = skb->len;
2709     diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
2710     index 2bfc12fdc929..761cf8573a80 100644
2711     --- a/drivers/net/wireless/ti/wlcore/cmd.c
2712     +++ b/drivers/net/wireless/ti/wlcore/cmd.c
2713     @@ -1069,7 +1069,8 @@ int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2714     ptr = NULL;
2715     } else {
2716     skb = ieee80211_nullfunc_get(wl->hw,
2717     - wl12xx_wlvif_to_vif(wlvif));
2718     + wl12xx_wlvif_to_vif(wlvif),
2719     + false);
2720     if (!skb)
2721     goto out;
2722     size = skb->len;
2723     @@ -1096,7 +1097,7 @@ int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
2724     struct sk_buff *skb = NULL;
2725     int ret = -ENOMEM;
2726    
2727     - skb = ieee80211_nullfunc_get(wl->hw, vif);
2728     + skb = ieee80211_nullfunc_get(wl->hw, vif, false);
2729     if (!skb)
2730     goto out;
2731    
2732     diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
2733     index 8b8689c6d887..391432e2725d 100644
2734     --- a/drivers/net/xen-netfront.c
2735     +++ b/drivers/net/xen-netfront.c
2736     @@ -87,6 +87,8 @@ struct netfront_cb {
2737     /* IRQ name is queue name with "-tx" or "-rx" appended */
2738     #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
2739    
2740     +static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
2741     +
2742     struct netfront_stats {
2743     u64 packets;
2744     u64 bytes;
2745     @@ -2021,10 +2023,12 @@ static void netback_changed(struct xenbus_device *dev,
2746     break;
2747    
2748     case XenbusStateClosed:
2749     + wake_up_all(&module_unload_q);
2750     if (dev->state == XenbusStateClosed)
2751     break;
2752     /* Missed the backend's CLOSING state -- fallthrough */
2753     case XenbusStateClosing:
2754     + wake_up_all(&module_unload_q);
2755     xenbus_frontend_closed(dev);
2756     break;
2757     }
2758     @@ -2130,6 +2134,20 @@ static int xennet_remove(struct xenbus_device *dev)
2759    
2760     dev_dbg(&dev->dev, "%s\n", dev->nodename);
2761    
2762     + if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
2763     + xenbus_switch_state(dev, XenbusStateClosing);
2764     + wait_event(module_unload_q,
2765     + xenbus_read_driver_state(dev->otherend) ==
2766     + XenbusStateClosing);
2767     +
2768     + xenbus_switch_state(dev, XenbusStateClosed);
2769     + wait_event(module_unload_q,
2770     + xenbus_read_driver_state(dev->otherend) ==
2771     + XenbusStateClosed ||
2772     + xenbus_read_driver_state(dev->otherend) ==
2773     + XenbusStateUnknown);
2774     + }
2775     +
2776     xennet_disconnect_backend(info);
2777    
2778     unregister_netdev(info->netdev);
2779     diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
2780     index bf33663218cd..9ff8529a64a9 100644
2781     --- a/drivers/nvme/host/fabrics.h
2782     +++ b/drivers/nvme/host/fabrics.h
2783     @@ -142,4 +142,34 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts);
2784     int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
2785     bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
2786    
2787     +static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
2788     + struct request *rq)
2789     +{
2790     + struct nvme_command *cmd = nvme_req(rq)->cmd;
2791     +
2792     + /*
2793     + * We cannot accept any other command until the connect command has
2794     + * completed, so only allow connect to pass.
2795     + */
2796     + if (!blk_rq_is_passthrough(rq) ||
2797     + cmd->common.opcode != nvme_fabrics_command ||
2798     + cmd->fabrics.fctype != nvme_fabrics_type_connect) {
2799     + /*
2800     + * Reconnecting state means transport disruption, which can take
2801     + * a long time and even might fail permanently, fail fast to
2802     + * give upper layers a chance to failover.
2803     + * Deleting state means that the ctrl will never accept commands
2804     + * again, fail it permanently.
2805     + */
2806     + if (ctrl->state == NVME_CTRL_RECONNECTING ||
2807     + ctrl->state == NVME_CTRL_DELETING) {
2808     + nvme_req(rq)->status = NVME_SC_ABORT_REQ;
2809     + return BLK_STS_IOERR;
2810     + }
2811     + return BLK_STS_RESOURCE; /* try again later */
2812     + }
2813     +
2814     + return BLK_STS_OK;
2815     +}
2816     +
2817     #endif /* _NVME_FABRICS_H */
2818     diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
2819     index be49d0f79381..3148d760d825 100644
2820     --- a/drivers/nvme/host/fc.c
2821     +++ b/drivers/nvme/host/fc.c
2822     @@ -41,6 +41,7 @@
2823    
2824     enum nvme_fc_queue_flags {
2825     NVME_FC_Q_CONNECTED = (1 << 0),
2826     + NVME_FC_Q_LIVE = (1 << 1),
2827     };
2828    
2829     #define NVMEFC_QUEUE_DELAY 3 /* ms units */
2830     @@ -1654,6 +1655,7 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
2831     if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
2832     return;
2833    
2834     + clear_bit(NVME_FC_Q_LIVE, &queue->flags);
2835     /*
2836     * Current implementation never disconnects a single queue.
2837     * It always terminates a whole association. So there is never
2838     @@ -1661,7 +1663,6 @@ nvme_fc_free_queue(struct nvme_fc_queue *queue)
2839     */
2840    
2841     queue->connection_id = 0;
2842     - clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
2843     }
2844    
2845     static void
2846     @@ -1740,6 +1741,8 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
2847     ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
2848     if (ret)
2849     break;
2850     +
2851     + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
2852     }
2853    
2854     return ret;
2855     @@ -2048,6 +2051,14 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
2856     return BLK_STS_RESOURCE;
2857     }
2858    
2859     +static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
2860     + struct request *rq)
2861     +{
2862     + if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
2863     + return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
2864     + return BLK_STS_OK;
2865     +}
2866     +
2867     static blk_status_t
2868     nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2869     const struct blk_mq_queue_data *bd)
2870     @@ -2063,6 +2074,10 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2871     u32 data_len;
2872     blk_status_t ret;
2873    
2874     + ret = nvme_fc_is_ready(queue, rq);
2875     + if (unlikely(ret))
2876     + return ret;
2877     +
2878     ret = nvme_setup_cmd(ns, rq, sqe);
2879     if (ret)
2880     return ret;
2881     @@ -2398,6 +2413,8 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl)
2882     if (ret)
2883     goto out_disconnect_admin_queue;
2884    
2885     + set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
2886     +
2887     /*
2888     * Check controller capabilities
2889     *
2890     diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
2891     index 75539f7c58b9..cdd2fd509ddc 100644
2892     --- a/drivers/nvme/host/pci.c
2893     +++ b/drivers/nvme/host/pci.c
2894     @@ -1617,6 +1617,7 @@ static void nvme_free_host_mem(struct nvme_dev *dev)
2895     dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
2896     dev->host_mem_descs, dev->host_mem_descs_dma);
2897     dev->host_mem_descs = NULL;
2898     + dev->nr_host_mem_descs = 0;
2899     }
2900    
2901     static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
2902     @@ -1645,7 +1646,7 @@ static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
2903     if (!bufs)
2904     goto out_free_descs;
2905    
2906     - for (size = 0; size < preferred; size += len) {
2907     + for (size = 0; size < preferred && i < max_entries; size += len) {
2908     dma_addr_t dma_addr;
2909    
2910     len = min_t(u64, chunk_size, preferred - size);
2911     @@ -2282,7 +2283,7 @@ static int nvme_dev_map(struct nvme_dev *dev)
2912     return -ENODEV;
2913     }
2914    
2915     -static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
2916     +static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
2917     {
2918     if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
2919     /*
2920     @@ -2297,6 +2298,14 @@ static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
2921     (dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
2922     dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
2923     return NVME_QUIRK_NO_DEEPEST_PS;
2924     + } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
2925     + /*
2926     + * Samsung SSD 960 EVO drops off the PCIe bus after system
2927     + * suspend on a Ryzen board, ASUS PRIME B350M-A.
2928     + */
2929     + if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
2930     + dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
2931     + return NVME_QUIRK_NO_APST;
2932     }
2933    
2934     return 0;
2935     @@ -2336,7 +2345,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2936     if (result)
2937     goto unmap;
2938    
2939     - quirks |= check_dell_samsung_bug(pdev);
2940     + quirks |= check_vendor_combination_bug(pdev);
2941    
2942     result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
2943     quirks);
2944     diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
2945     index 0ebb539f3bd3..33d4431c2b4b 100644
2946     --- a/drivers/nvme/host/rdma.c
2947     +++ b/drivers/nvme/host/rdma.c
2948     @@ -67,6 +67,9 @@ struct nvme_rdma_request {
2949     struct nvme_request req;
2950     struct ib_mr *mr;
2951     struct nvme_rdma_qe sqe;
2952     + union nvme_result result;
2953     + __le16 status;
2954     + refcount_t ref;
2955     struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
2956     u32 num_sge;
2957     int nents;
2958     @@ -1177,6 +1180,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
2959     req->num_sge = 1;
2960     req->inline_data = false;
2961     req->mr->need_inval = false;
2962     + refcount_set(&req->ref, 2); /* send and recv completions */
2963    
2964     c->common.flags |= NVME_CMD_SGL_METABUF;
2965    
2966     @@ -1213,8 +1217,19 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
2967    
2968     static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
2969     {
2970     - if (unlikely(wc->status != IB_WC_SUCCESS))
2971     + struct nvme_rdma_qe *qe =
2972     + container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
2973     + struct nvme_rdma_request *req =
2974     + container_of(qe, struct nvme_rdma_request, sqe);
2975     + struct request *rq = blk_mq_rq_from_pdu(req);
2976     +
2977     + if (unlikely(wc->status != IB_WC_SUCCESS)) {
2978     nvme_rdma_wr_error(cq, wc, "SEND");
2979     + return;
2980     + }
2981     +
2982     + if (refcount_dec_and_test(&req->ref))
2983     + nvme_end_request(rq, req->status, req->result);
2984     }
2985    
2986     /*
2987     @@ -1359,14 +1374,19 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue,
2988     }
2989     req = blk_mq_rq_to_pdu(rq);
2990    
2991     - if (rq->tag == tag)
2992     - ret = 1;
2993     + req->status = cqe->status;
2994     + req->result = cqe->result;
2995    
2996     if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
2997     wc->ex.invalidate_rkey == req->mr->rkey)
2998     req->mr->need_inval = false;
2999    
3000     - nvme_end_request(rq, cqe->status, cqe->result);
3001     + if (refcount_dec_and_test(&req->ref)) {
3002     + if (rq->tag == tag)
3003     + ret = 1;
3004     + nvme_end_request(rq, req->status, req->result);
3005     + }
3006     +
3007     return ret;
3008     }
3009    
3010     @@ -1603,31 +1623,11 @@ nvme_rdma_timeout(struct request *rq, bool reserved)
3011     * We cannot accept any other command until the Connect command has completed.
3012     */
3013     static inline blk_status_t
3014     -nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
3015     -{
3016     - if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
3017     - struct nvme_command *cmd = nvme_req(rq)->cmd;
3018     -
3019     - if (!blk_rq_is_passthrough(rq) ||
3020     - cmd->common.opcode != nvme_fabrics_command ||
3021     - cmd->fabrics.fctype != nvme_fabrics_type_connect) {
3022     - /*
3023     - * reconnecting state means transport disruption, which
3024     - * can take a long time and even might fail permanently,
3025     - * fail fast to give upper layers a chance to failover.
3026     - * deleting state means that the ctrl will never accept
3027     - * commands again, fail it permanently.
3028     - */
3029     - if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
3030     - queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
3031     - nvme_req(rq)->status = NVME_SC_ABORT_REQ;
3032     - return BLK_STS_IOERR;
3033     - }
3034     - return BLK_STS_RESOURCE; /* try again later */
3035     - }
3036     - }
3037     -
3038     - return 0;
3039     +nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
3040     +{
3041     + if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
3042     + return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
3043     + return BLK_STS_OK;
3044     }
3045    
3046     static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
3047     @@ -1646,7 +1646,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
3048    
3049     WARN_ON_ONCE(rq->tag < 0);
3050    
3051     - ret = nvme_rdma_queue_is_ready(queue, rq);
3052     + ret = nvme_rdma_is_ready(queue, rq);
3053     if (unlikely(ret))
3054     return ret;
3055    
3056     diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
3057     index 58e010bdda3e..8e21211b904b 100644
3058     --- a/drivers/nvme/target/fc.c
3059     +++ b/drivers/nvme/target/fc.c
3060     @@ -532,15 +532,15 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
3061    
3062     tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
3063    
3064     + /* release the queue lookup reference on the completed IO */
3065     + nvmet_fc_tgt_q_put(queue);
3066     +
3067     spin_lock_irqsave(&queue->qlock, flags);
3068     deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
3069     struct nvmet_fc_defer_fcp_req, req_list);
3070     if (!deferfcp) {
3071     list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
3072     spin_unlock_irqrestore(&queue->qlock, flags);
3073     -
3074     - /* Release reference taken at queue lookup and fod allocation */
3075     - nvmet_fc_tgt_q_put(queue);
3076     return;
3077     }
3078    
3079     @@ -759,6 +759,9 @@ nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
3080     tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
3081     deferfcp->fcp_req);
3082    
3083     + /* release the queue lookup reference */
3084     + nvmet_fc_tgt_q_put(queue);
3085     +
3086     kfree(deferfcp);
3087    
3088     spin_lock_irqsave(&queue->qlock, flags);
3089     diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
3090     index 92628c432926..02aff5cc48bf 100644
3091     --- a/drivers/nvme/target/loop.c
3092     +++ b/drivers/nvme/target/loop.c
3093     @@ -61,10 +61,15 @@ static inline struct nvme_loop_ctrl *to_loop_ctrl(struct nvme_ctrl *ctrl)
3094     return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
3095     }
3096    
3097     +enum nvme_loop_queue_flags {
3098     + NVME_LOOP_Q_LIVE = 0,
3099     +};
3100     +
3101     struct nvme_loop_queue {
3102     struct nvmet_cq nvme_cq;
3103     struct nvmet_sq nvme_sq;
3104     struct nvme_loop_ctrl *ctrl;
3105     + unsigned long flags;
3106     };
3107    
3108     static struct nvmet_port *nvmet_loop_port;
3109     @@ -153,6 +158,14 @@ nvme_loop_timeout(struct request *rq, bool reserved)
3110     return BLK_EH_HANDLED;
3111     }
3112    
3113     +static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
3114     + struct request *rq)
3115     +{
3116     + if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
3117     + return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
3118     + return BLK_STS_OK;
3119     +}
3120     +
3121     static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
3122     const struct blk_mq_queue_data *bd)
3123     {
3124     @@ -162,6 +175,10 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
3125     struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
3126     blk_status_t ret;
3127    
3128     + ret = nvme_loop_is_ready(queue, req);
3129     + if (unlikely(ret))
3130     + return ret;
3131     +
3132     ret = nvme_setup_cmd(ns, req, &iod->cmd);
3133     if (ret)
3134     return ret;
3135     @@ -275,6 +292,7 @@ static const struct blk_mq_ops nvme_loop_admin_mq_ops = {
3136    
3137     static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
3138     {
3139     + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
3140     nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
3141     blk_cleanup_queue(ctrl->ctrl.admin_q);
3142     blk_mq_free_tag_set(&ctrl->admin_tag_set);
3143     @@ -305,8 +323,10 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl)
3144     {
3145     int i;
3146    
3147     - for (i = 1; i < ctrl->ctrl.queue_count; i++)
3148     + for (i = 1; i < ctrl->ctrl.queue_count; i++) {
3149     + clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
3150     nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
3151     + }
3152     }
3153    
3154     static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
3155     @@ -346,6 +366,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl)
3156     ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
3157     if (ret)
3158     return ret;
3159     + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
3160     }
3161    
3162     return 0;
3163     @@ -387,6 +408,8 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl)
3164     if (error)
3165     goto out_cleanup_queue;
3166    
3167     + set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
3168     +
3169     error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
3170     if (error) {
3171     dev_err(ctrl->ctrl.device,
3172     diff --git a/drivers/power/reset/zx-reboot.c b/drivers/power/reset/zx-reboot.c
3173     index 7549c7f74a3c..c03e96e6a041 100644
3174     --- a/drivers/power/reset/zx-reboot.c
3175     +++ b/drivers/power/reset/zx-reboot.c
3176     @@ -82,3 +82,7 @@ static struct platform_driver zx_reboot_driver = {
3177     },
3178     };
3179     module_platform_driver(zx_reboot_driver);
3180     +
3181     +MODULE_DESCRIPTION("ZTE SoCs reset driver");
3182     +MODULE_AUTHOR("Jun Nie <jun.nie@linaro.org>");
3183     +MODULE_LICENSE("GPL v2");
3184     diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
3185     index b5f4006198b9..a9a56aa9c26b 100644
3186     --- a/drivers/s390/crypto/zcrypt_api.c
3187     +++ b/drivers/s390/crypto/zcrypt_api.c
3188     @@ -218,8 +218,8 @@ static inline bool zcrypt_queue_compare(struct zcrypt_queue *zq,
3189     weight += atomic_read(&zq->load);
3190     pref_weight += atomic_read(&pref_zq->load);
3191     if (weight == pref_weight)
3192     - return &zq->queue->total_request_count >
3193     - &pref_zq->queue->total_request_count;
3194     + return zq->queue->total_request_count >
3195     + pref_zq->queue->total_request_count;
3196     return weight > pref_weight;
3197     }
3198    
3199     diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
3200     index af3e4d3f9735..7173ae53c526 100644
3201     --- a/drivers/scsi/aacraid/aachba.c
3202     +++ b/drivers/scsi/aacraid/aachba.c
3203     @@ -913,8 +913,15 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
3204     memset(str, ' ', sizeof(*str));
3205    
3206     if (sup_adap_info->adapter_type_text[0]) {
3207     - char *cp = sup_adap_info->adapter_type_text;
3208     int c;
3209     + char *cp;
3210     + char *cname = kmemdup(sup_adap_info->adapter_type_text,
3211     + sizeof(sup_adap_info->adapter_type_text),
3212     + GFP_ATOMIC);
3213     + if (!cname)
3214     + return;
3215     +
3216     + cp = cname;
3217     if ((cp[0] == 'A') && (cp[1] == 'O') && (cp[2] == 'C'))
3218     inqstrcpy("SMC", str->vid);
3219     else {
3220     @@ -923,7 +930,7 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
3221     ++cp;
3222     c = *cp;
3223     *cp = '\0';
3224     - inqstrcpy(sup_adap_info->adapter_type_text, str->vid);
3225     + inqstrcpy(cname, str->vid);
3226     *cp = c;
3227     while (*cp && *cp != ' ')
3228     ++cp;
3229     @@ -937,8 +944,8 @@ static void setinqstr(struct aac_dev *dev, void *data, int tindex)
3230     cp[sizeof(str->pid)] = '\0';
3231     }
3232     inqstrcpy (cp, str->pid);
3233     - if (c)
3234     - cp[sizeof(str->pid)] = c;
3235     +
3236     + kfree(cname);
3237     } else {
3238     struct aac_driver_ident *mp = aac_get_driver_ident(dev->cardtype);
3239    
3240     diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
3241     index 525a652dab48..c0a4fcb7fd0a 100644
3242     --- a/drivers/scsi/aacraid/commsup.c
3243     +++ b/drivers/scsi/aacraid/commsup.c
3244     @@ -1583,6 +1583,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
3245     * will ensure that i/o is queisced and the card is flushed in that
3246     * case.
3247     */
3248     + aac_free_irq(aac);
3249     aac_fib_map_free(aac);
3250     dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
3251     aac->comm_phys);
3252     @@ -1590,7 +1591,6 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
3253     aac->comm_phys = 0;
3254     kfree(aac->queues);
3255     aac->queues = NULL;
3256     - aac_free_irq(aac);
3257     kfree(aac->fsa_dev);
3258     aac->fsa_dev = NULL;
3259    
3260     @@ -1672,14 +1672,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
3261     out:
3262     aac->in_reset = 0;
3263     scsi_unblock_requests(host);
3264     - /*
3265     - * Issue bus rescan to catch any configuration that might have
3266     - * occurred
3267     - */
3268     - if (!retval) {
3269     - dev_info(&aac->pdev->dev, "Issuing bus rescan\n");
3270     - scsi_scan_host(host);
3271     - }
3272     +
3273     if (jafo) {
3274     spin_lock_irq(host->host_lock);
3275     }
3276     diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3277     index 794a4600e952..d344fef01f1d 100644
3278     --- a/drivers/scsi/ufs/ufshcd.c
3279     +++ b/drivers/scsi/ufs/ufshcd.c
3280     @@ -6555,12 +6555,15 @@ static int ufshcd_config_vreg(struct device *dev,
3281     struct ufs_vreg *vreg, bool on)
3282     {
3283     int ret = 0;
3284     - struct regulator *reg = vreg->reg;
3285     - const char *name = vreg->name;
3286     + struct regulator *reg;
3287     + const char *name;
3288     int min_uV, uA_load;
3289    
3290     BUG_ON(!vreg);
3291    
3292     + reg = vreg->reg;
3293     + name = vreg->name;
3294     +
3295     if (regulator_count_voltages(reg) > 0) {
3296     min_uV = on ? vreg->min_uV : 0;
3297     ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
3298     diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
3299     index babb15f07995..d51ca243a028 100644
3300     --- a/drivers/spi/spi-imx.c
3301     +++ b/drivers/spi/spi-imx.c
3302     @@ -1496,12 +1496,23 @@ static int spi_imx_remove(struct platform_device *pdev)
3303     {
3304     struct spi_master *master = platform_get_drvdata(pdev);
3305     struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
3306     + int ret;
3307    
3308     spi_bitbang_stop(&spi_imx->bitbang);
3309    
3310     + ret = clk_enable(spi_imx->clk_per);
3311     + if (ret)
3312     + return ret;
3313     +
3314     + ret = clk_enable(spi_imx->clk_ipg);
3315     + if (ret) {
3316     + clk_disable(spi_imx->clk_per);
3317     + return ret;
3318     + }
3319     +
3320     writel(0, spi_imx->base + MXC_CSPICTRL);
3321     - clk_unprepare(spi_imx->clk_ipg);
3322     - clk_unprepare(spi_imx->clk_per);
3323     + clk_disable_unprepare(spi_imx->clk_ipg);
3324     + clk_disable_unprepare(spi_imx->clk_per);
3325     spi_imx_sdma_exit(spi_imx);
3326     spi_master_put(master);
3327    
3328     diff --git a/drivers/staging/ccree/ssi_cipher.c b/drivers/staging/ccree/ssi_cipher.c
3329     index 8d31a93fd8b7..087a622f20b2 100644
3330     --- a/drivers/staging/ccree/ssi_cipher.c
3331     +++ b/drivers/staging/ccree/ssi_cipher.c
3332     @@ -904,6 +904,7 @@ static int ssi_ablkcipher_decrypt(struct ablkcipher_request *req)
3333     scatterwalk_map_and_copy(req_ctx->backup_info, req->src,
3334     (req->nbytes - ivsize), ivsize, 0);
3335     req_ctx->is_giv = false;
3336     + req_ctx->backup_info = NULL;
3337    
3338     return ssi_blkcipher_process(tfm, req_ctx, req->dst, req->src, req->nbytes, req->info, ivsize, (void *)req, DRV_CRYPTO_DIRECTION_DECRYPT);
3339     }
3340     diff --git a/drivers/staging/ccree/ssi_driver.c b/drivers/staging/ccree/ssi_driver.c
3341     index 9c6f1200c130..eeb995307951 100644
3342     --- a/drivers/staging/ccree/ssi_driver.c
3343     +++ b/drivers/staging/ccree/ssi_driver.c
3344     @@ -141,7 +141,7 @@ static irqreturn_t cc_isr(int irq, void *dev_id)
3345     irr &= ~SSI_COMP_IRQ_MASK;
3346     complete_request(drvdata);
3347     }
3348     -#ifdef CC_SUPPORT_FIPS
3349     +#ifdef CONFIG_CRYPTO_FIPS
3350     /* TEE FIPS interrupt */
3351     if (likely((irr & SSI_GPR0_IRQ_MASK) != 0)) {
3352     /* Mask interrupt - will be unmasked in Deferred service handler */
3353     diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
3354     index 64763aacda57..284cdd44a2ee 100644
3355     --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
3356     +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
3357     @@ -825,14 +825,15 @@ struct kib_conn *kiblnd_create_conn(struct kib_peer *peer, struct rdma_cm_id *cm
3358     return conn;
3359    
3360     failed_2:
3361     - kiblnd_destroy_conn(conn, true);
3362     + kiblnd_destroy_conn(conn);
3363     + LIBCFS_FREE(conn, sizeof(*conn));
3364     failed_1:
3365     LIBCFS_FREE(init_qp_attr, sizeof(*init_qp_attr));
3366     failed_0:
3367     return NULL;
3368     }
3369    
3370     -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
3371     +void kiblnd_destroy_conn(struct kib_conn *conn)
3372     {
3373     struct rdma_cm_id *cmid = conn->ibc_cmid;
3374     struct kib_peer *peer = conn->ibc_peer;
3375     @@ -895,8 +896,6 @@ void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn)
3376     rdma_destroy_id(cmid);
3377     atomic_dec(&net->ibn_nconns);
3378     }
3379     -
3380     - LIBCFS_FREE(conn, sizeof(*conn));
3381     }
3382    
3383     int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why)
3384     diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
3385     index a1e994a1cc84..98a5e2c21a83 100644
3386     --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
3387     +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
3388     @@ -1015,7 +1015,7 @@ int kiblnd_close_peer_conns_locked(struct kib_peer *peer, int why);
3389     struct kib_conn *kiblnd_create_conn(struct kib_peer *peer,
3390     struct rdma_cm_id *cmid,
3391     int state, int version);
3392     -void kiblnd_destroy_conn(struct kib_conn *conn, bool free_conn);
3393     +void kiblnd_destroy_conn(struct kib_conn *conn);
3394     void kiblnd_close_conn(struct kib_conn *conn, int error);
3395     void kiblnd_close_conn_locked(struct kib_conn *conn, int error);
3396    
3397     diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
3398     index 8fc191d99927..29e10021b906 100644
3399     --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
3400     +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
3401     @@ -3313,11 +3313,13 @@ kiblnd_connd(void *arg)
3402     spin_unlock_irqrestore(lock, flags);
3403     dropped_lock = 1;
3404    
3405     - kiblnd_destroy_conn(conn, !peer);
3406     + kiblnd_destroy_conn(conn);
3407    
3408     spin_lock_irqsave(lock, flags);
3409     - if (!peer)
3410     + if (!peer) {
3411     + kfree(conn);
3412     continue;
3413     + }
3414    
3415     conn->ibc_peer = peer;
3416     if (peer->ibp_reconnected < KIB_RECONN_HIGH_RACE)
3417     diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
3418     index c0664dc80bf2..446310775e90 100644
3419     --- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
3420     +++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
3421     @@ -1395,19 +1395,13 @@ static int rtw_wx_get_essid(struct net_device *dev,
3422     if ((check_fwstate(pmlmepriv, _FW_LINKED)) ||
3423     (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE))) {
3424     len = pcur_bss->Ssid.SsidLength;
3425     -
3426     - wrqu->essid.length = len;
3427     -
3428     memcpy(extra, pcur_bss->Ssid.Ssid, len);
3429     -
3430     - wrqu->essid.flags = 1;
3431     } else {
3432     - ret = -1;
3433     - goto exit;
3434     + len = 0;
3435     + *extra = 0;
3436     }
3437     -
3438     -exit:
3439     -
3440     + wrqu->essid.length = len;
3441     + wrqu->essid.flags = 1;
3442    
3443     return ret;
3444     }
3445     diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
3446     index 1222c005fb98..951680640ad5 100644
3447     --- a/drivers/tty/serial/8250/8250_of.c
3448     +++ b/drivers/tty/serial/8250/8250_of.c
3449     @@ -141,8 +141,11 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
3450     }
3451    
3452     info->rst = devm_reset_control_get_optional_shared(&ofdev->dev, NULL);
3453     - if (IS_ERR(info->rst))
3454     + if (IS_ERR(info->rst)) {
3455     + ret = PTR_ERR(info->rst);
3456     goto err_dispose;
3457     + }
3458     +
3459     ret = reset_control_deassert(info->rst);
3460     if (ret)
3461     goto err_dispose;
3462     diff --git a/drivers/tty/serial/8250/8250_uniphier.c b/drivers/tty/serial/8250/8250_uniphier.c
3463     index 8a10b10e27aa..c206f173f912 100644
3464     --- a/drivers/tty/serial/8250/8250_uniphier.c
3465     +++ b/drivers/tty/serial/8250/8250_uniphier.c
3466     @@ -259,12 +259,13 @@ static int uniphier_uart_probe(struct platform_device *pdev)
3467     up.dl_read = uniphier_serial_dl_read;
3468     up.dl_write = uniphier_serial_dl_write;
3469    
3470     - priv->line = serial8250_register_8250_port(&up);
3471     - if (priv->line < 0) {
3472     + ret = serial8250_register_8250_port(&up);
3473     + if (ret < 0) {
3474     dev_err(dev, "failed to register 8250 port\n");
3475     clk_disable_unprepare(priv->clk);
3476     return ret;
3477     }
3478     + priv->line = ret;
3479    
3480     platform_set_drvdata(pdev, priv);
3481    
3482     diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
3483     index 3657d745e90f..521500c575c8 100644
3484     --- a/drivers/tty/serial/imx.c
3485     +++ b/drivers/tty/serial/imx.c
3486     @@ -2275,12 +2275,14 @@ static void serial_imx_enable_wakeup(struct imx_port *sport, bool on)
3487     val &= ~UCR3_AWAKEN;
3488     writel(val, sport->port.membase + UCR3);
3489    
3490     - val = readl(sport->port.membase + UCR1);
3491     - if (on)
3492     - val |= UCR1_RTSDEN;
3493     - else
3494     - val &= ~UCR1_RTSDEN;
3495     - writel(val, sport->port.membase + UCR1);
3496     + if (sport->have_rtscts) {
3497     + val = readl(sport->port.membase + UCR1);
3498     + if (on)
3499     + val |= UCR1_RTSDEN;
3500     + else
3501     + val &= ~UCR1_RTSDEN;
3502     + writel(val, sport->port.membase + UCR1);
3503     + }
3504     }
3505    
3506     static int imx_serial_port_suspend_noirq(struct device *dev)
3507     diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
3508     index 94cccb6efa32..7892d0be8af9 100644
3509     --- a/drivers/tty/tty_io.c
3510     +++ b/drivers/tty/tty_io.c
3511     @@ -1322,6 +1322,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
3512     "%s: %s driver does not set tty->port. This will crash the kernel later. Fix the driver!\n",
3513     __func__, tty->driver->name);
3514    
3515     + retval = tty_ldisc_lock(tty, 5 * HZ);
3516     + if (retval)
3517     + goto err_release_lock;
3518     tty->port->itty = tty;
3519    
3520     /*
3521     @@ -1332,6 +1335,7 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
3522     retval = tty_ldisc_setup(tty, tty->link);
3523     if (retval)
3524     goto err_release_tty;
3525     + tty_ldisc_unlock(tty);
3526     /* Return the tty locked so that it cannot vanish under the caller */
3527     return tty;
3528    
3529     @@ -1344,9 +1348,11 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
3530    
3531     /* call the tty release_tty routine to clean out this slot */
3532     err_release_tty:
3533     - tty_unlock(tty);
3534     + tty_ldisc_unlock(tty);
3535     tty_info_ratelimited(tty, "ldisc open failed (%d), clearing slot %d\n",
3536     retval, idx);
3537     +err_release_lock:
3538     + tty_unlock(tty);
3539     release_tty(tty, idx);
3540     return ERR_PTR(retval);
3541     }
3542     diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
3543     index 84a8ac2a779f..7c895684c3ef 100644
3544     --- a/drivers/tty/tty_ldisc.c
3545     +++ b/drivers/tty/tty_ldisc.c
3546     @@ -336,7 +336,7 @@ static inline void __tty_ldisc_unlock(struct tty_struct *tty)
3547     ldsem_up_write(&tty->ldisc_sem);
3548     }
3549    
3550     -static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
3551     +int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
3552     {
3553     int ret;
3554    
3555     @@ -347,7 +347,7 @@ static int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout)
3556     return 0;
3557     }
3558    
3559     -static void tty_ldisc_unlock(struct tty_struct *tty)
3560     +void tty_ldisc_unlock(struct tty_struct *tty)
3561     {
3562     clear_bit(TTY_LDISC_HALTED, &tty->flags);
3563     __tty_ldisc_unlock(tty);
3564     diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3565     index 18c923a4c16e..4149a965516e 100644
3566     --- a/drivers/usb/class/cdc-acm.c
3567     +++ b/drivers/usb/class/cdc-acm.c
3568     @@ -438,7 +438,7 @@ static int acm_submit_read_urb(struct acm *acm, int index, gfp_t mem_flags)
3569    
3570     res = usb_submit_urb(acm->read_urbs[index], mem_flags);
3571     if (res) {
3572     - if (res != -EPERM) {
3573     + if (res != -EPERM && res != -ENODEV) {
3574     dev_err(&acm->data->dev,
3575     "urb %d failed submission with %d\n",
3576     index, res);
3577     @@ -1765,6 +1765,9 @@ static const struct usb_device_id acm_ids[] = {
3578     { USB_DEVICE(0x0ace, 0x1611), /* ZyDAS 56K USB MODEM - new version */
3579     .driver_info = SINGLE_RX_URB, /* firmware bug */
3580     },
3581     + { USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
3582     + .driver_info = SINGLE_RX_URB,
3583     + },
3584     { USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
3585     .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
3586     },
3587     diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
3588     index 5d061b3d8224..ed9346f0b000 100644
3589     --- a/drivers/usb/gadget/composite.c
3590     +++ b/drivers/usb/gadget/composite.c
3591     @@ -150,7 +150,6 @@ int config_ep_by_speed(struct usb_gadget *g,
3592     struct usb_function *f,
3593     struct usb_ep *_ep)
3594     {
3595     - struct usb_composite_dev *cdev = get_gadget_data(g);
3596     struct usb_endpoint_descriptor *chosen_desc = NULL;
3597     struct usb_descriptor_header **speed_desc = NULL;
3598    
3599     @@ -229,8 +228,12 @@ int config_ep_by_speed(struct usb_gadget *g,
3600     _ep->maxburst = comp_desc->bMaxBurst + 1;
3601     break;
3602     default:
3603     - if (comp_desc->bMaxBurst != 0)
3604     + if (comp_desc->bMaxBurst != 0) {
3605     + struct usb_composite_dev *cdev;
3606     +
3607     + cdev = get_gadget_data(g);
3608     ERROR(cdev, "ep0 bMaxBurst must be 0\n");
3609     + }
3610     _ep->maxburst = 1;
3611     break;
3612     }
3613     diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
3614     index 876cdbec1307..c0491dd73f53 100644
3615     --- a/drivers/usb/gadget/function/f_fs.c
3616     +++ b/drivers/usb/gadget/function/f_fs.c
3617     @@ -3704,7 +3704,8 @@ static void ffs_closed(struct ffs_data *ffs)
3618     ci = opts->func_inst.group.cg_item.ci_parent->ci_parent;
3619     ffs_dev_unlock();
3620    
3621     - unregister_gadget_item(ci);
3622     + if (test_bit(FFS_FL_BOUND, &ffs->flags))
3623     + unregister_gadget_item(ci);
3624     return;
3625     done:
3626     ffs_dev_unlock();
3627     diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
3628     index 284bd1a7b570..794bb4958383 100644
3629     --- a/drivers/usb/gadget/udc/core.c
3630     +++ b/drivers/usb/gadget/udc/core.c
3631     @@ -923,7 +923,7 @@ int usb_gadget_ep_match_desc(struct usb_gadget *gadget,
3632     return 0;
3633    
3634     /* "high bandwidth" works only at high speed */
3635     - if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp(desc) & (3<<11))
3636     + if (!gadget_is_dualspeed(gadget) && usb_endpoint_maxp_mult(desc) > 1)
3637     return 0;
3638    
3639     switch (type) {
3640     diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
3641     index a8d5f2e4878d..c66b93664d54 100644
3642     --- a/drivers/usb/serial/Kconfig
3643     +++ b/drivers/usb/serial/Kconfig
3644     @@ -63,6 +63,7 @@ config USB_SERIAL_SIMPLE
3645     - Google USB serial devices
3646     - HP4x calculators
3647     - a number of Motorola phones
3648     + - Motorola Tetra devices
3649     - Novatel Wireless GPS receivers
3650     - Siemens USB/MPI adapter.
3651     - ViVOtech ViVOpay USB device.
3652     diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
3653     index bdf8bd814a9a..01f3ac7769f3 100644
3654     --- a/drivers/usb/serial/io_edgeport.c
3655     +++ b/drivers/usb/serial/io_edgeport.c
3656     @@ -2286,7 +2286,6 @@ static int write_cmd_usb(struct edgeport_port *edge_port,
3657     /* something went wrong */
3658     dev_err(dev, "%s - usb_submit_urb(write command) failed, status = %d\n",
3659     __func__, status);
3660     - usb_kill_urb(urb);
3661     usb_free_urb(urb);
3662     atomic_dec(&CmdUrbs);
3663     return status;
3664     diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
3665     index a9400458ccea..dcf78a498927 100644
3666     --- a/drivers/usb/serial/option.c
3667     +++ b/drivers/usb/serial/option.c
3668     @@ -383,6 +383,9 @@ static void option_instat_callback(struct urb *urb);
3669     #define FOUR_G_SYSTEMS_PRODUCT_W14 0x9603
3670     #define FOUR_G_SYSTEMS_PRODUCT_W100 0x9b01
3671    
3672     +/* Fujisoft products */
3673     +#define FUJISOFT_PRODUCT_FS040U 0x9b02
3674     +
3675     /* iBall 3.5G connect wireless modem */
3676     #define IBALL_3_5G_CONNECT 0x9605
3677    
3678     @@ -1897,6 +1900,8 @@ static const struct usb_device_id option_ids[] = {
3679     { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W100),
3680     .driver_info = (kernel_ulong_t)&four_g_w100_blacklist
3681     },
3682     + {USB_DEVICE(LONGCHEER_VENDOR_ID, FUJISOFT_PRODUCT_FS040U),
3683     + .driver_info = (kernel_ulong_t)&net_intf3_blacklist},
3684     { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
3685     { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, 0x9801, 0xff),
3686     .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
3687     diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c
3688     index a585b477415d..34c5a75f98a7 100644
3689     --- a/drivers/usb/serial/pl2303.c
3690     +++ b/drivers/usb/serial/pl2303.c
3691     @@ -41,6 +41,7 @@ static const struct usb_device_id id_table[] = {
3692     { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ2) },
3693     { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_DCU11) },
3694     { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_RSAQ3) },
3695     + { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_CHILITAG) },
3696     { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_PHAROS) },
3697     { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_ALDIGA) },
3698     { USB_DEVICE(PL2303_VENDOR_ID, PL2303_PRODUCT_ID_MMX) },
3699     diff --git a/drivers/usb/serial/pl2303.h b/drivers/usb/serial/pl2303.h
3700     index 3b5a15d1dc0d..123289085ee2 100644
3701     --- a/drivers/usb/serial/pl2303.h
3702     +++ b/drivers/usb/serial/pl2303.h
3703     @@ -17,6 +17,7 @@
3704     #define PL2303_PRODUCT_ID_DCU11 0x1234
3705     #define PL2303_PRODUCT_ID_PHAROS 0xaaa0
3706     #define PL2303_PRODUCT_ID_RSAQ3 0xaaa2
3707     +#define PL2303_PRODUCT_ID_CHILITAG 0xaaa8
3708     #define PL2303_PRODUCT_ID_ALDIGA 0x0611
3709     #define PL2303_PRODUCT_ID_MMX 0x0612
3710     #define PL2303_PRODUCT_ID_GPRS 0x0609
3711     diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
3712     index e98b6e57b703..6aa7ff2c1cf7 100644
3713     --- a/drivers/usb/serial/usb-serial-simple.c
3714     +++ b/drivers/usb/serial/usb-serial-simple.c
3715     @@ -80,6 +80,11 @@ DEVICE(vivopay, VIVOPAY_IDS);
3716     { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
3717     DEVICE(moto_modem, MOTO_IDS);
3718    
3719     +/* Motorola Tetra driver */
3720     +#define MOTOROLA_TETRA_IDS() \
3721     + { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
3722     +DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
3723     +
3724     /* Novatel Wireless GPS driver */
3725     #define NOVATEL_IDS() \
3726     { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */
3727     @@ -110,6 +115,7 @@ static struct usb_serial_driver * const serial_drivers[] = {
3728     &google_device,
3729     &vivopay_device,
3730     &moto_modem_device,
3731     + &motorola_tetra_device,
3732     &novatel_gps_device,
3733     &hp4x_device,
3734     &suunto_device,
3735     @@ -125,6 +131,7 @@ static const struct usb_device_id id_table[] = {
3736     GOOGLE_IDS(),
3737     VIVOPAY_IDS(),
3738     MOTO_IDS(),
3739     + MOTOROLA_TETRA_IDS(),
3740     NOVATEL_IDS(),
3741     HP4X_IDS(),
3742     SUUNTO_IDS(),
3743     diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
3744     index 63cf981ed81c..0bc8543e96b1 100644
3745     --- a/drivers/usb/storage/uas.c
3746     +++ b/drivers/usb/storage/uas.c
3747     @@ -1076,20 +1076,19 @@ static int uas_post_reset(struct usb_interface *intf)
3748     return 0;
3749    
3750     err = uas_configure_endpoints(devinfo);
3751     - if (err) {
3752     + if (err && err != ENODEV)
3753     shost_printk(KERN_ERR, shost,
3754     "%s: alloc streams error %d after reset",
3755     __func__, err);
3756     - return 1;
3757     - }
3758    
3759     + /* we must unblock the host in every case lest we deadlock */
3760     spin_lock_irqsave(shost->host_lock, flags);
3761     scsi_report_bus_reset(shost, 0);
3762     spin_unlock_irqrestore(shost->host_lock, flags);
3763    
3764     scsi_unblock_requests(shost);
3765    
3766     - return 0;
3767     + return err ? 1 : 0;
3768     }
3769    
3770     static int uas_suspend(struct usb_interface *intf, pm_message_t message)
3771     diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
3772     index aafcc785f840..d564a7049d7f 100644
3773     --- a/fs/btrfs/file.c
3774     +++ b/fs/btrfs/file.c
3775     @@ -2056,6 +2056,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
3776     len = (u64)end - (u64)start + 1;
3777     trace_btrfs_sync_file(file, datasync);
3778    
3779     + btrfs_init_log_ctx(&ctx, inode);
3780     +
3781     /*
3782     * We write the dirty pages in the range and wait until they complete
3783     * out of the ->i_mutex. If so, we can flush the dirty pages by
3784     @@ -2202,8 +2204,6 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
3785     }
3786     trans->sync = true;
3787    
3788     - btrfs_init_log_ctx(&ctx, inode);
3789     -
3790     ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
3791     if (ret < 0) {
3792     /* Fallthrough and commit/free transaction. */
3793     @@ -2261,6 +2261,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
3794     ret = btrfs_end_transaction(trans);
3795     }
3796     out:
3797     + ASSERT(list_empty(&ctx.list));
3798     err = file_check_and_advance_wb_err(file);
3799     if (!ret)
3800     ret = err;
3801     diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
3802     index cdc9f4015ec3..4426d1c73e50 100644
3803     --- a/fs/btrfs/free-space-cache.c
3804     +++ b/fs/btrfs/free-space-cache.c
3805     @@ -1264,7 +1264,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
3806     /* Lock all pages first so we can lock the extent safely. */
3807     ret = io_ctl_prepare_pages(io_ctl, inode, 0);
3808     if (ret)
3809     - goto out;
3810     + goto out_unlock;
3811    
3812     lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
3813     &cached_state);
3814     @@ -1358,6 +1358,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
3815     out_nospc:
3816     cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
3817    
3818     +out_unlock:
3819     if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
3820     up_write(&block_group->data_rwsem);
3821    
3822     diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
3823     index d94e3f68b9b1..c71afd424900 100644
3824     --- a/fs/btrfs/inode.c
3825     +++ b/fs/btrfs/inode.c
3826     @@ -5500,6 +5500,14 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry,
3827     goto out_err;
3828    
3829     btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
3830     + if (location->type != BTRFS_INODE_ITEM_KEY &&
3831     + location->type != BTRFS_ROOT_ITEM_KEY) {
3832     + btrfs_warn(root->fs_info,
3833     +"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
3834     + __func__, name, btrfs_ino(BTRFS_I(dir)),
3835     + location->objectid, location->type, location->offset);
3836     + goto out_err;
3837     + }
3838     out:
3839     btrfs_free_path(path);
3840     return ret;
3841     @@ -5816,8 +5824,6 @@ struct inode *btrfs_lookup_dentry(struct inode *dir, struct dentry *dentry)
3842     return inode;
3843     }
3844    
3845     - BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
3846     -
3847     index = srcu_read_lock(&fs_info->subvol_srcu);
3848     ret = fixup_tree_root_location(fs_info, dir, dentry,
3849     &location, &sub_root);
3850     diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
3851     index 8fd195cfe81b..2c35717a3470 100644
3852     --- a/fs/btrfs/send.c
3853     +++ b/fs/btrfs/send.c
3854     @@ -3527,7 +3527,40 @@ static int wait_for_dest_dir_move(struct send_ctx *sctx,
3855     }
3856    
3857     /*
3858     - * Check if ino ino1 is an ancestor of inode ino2 in the given root.
3859     + * Check if inode ino2, or any of its ancestors, is inode ino1.
3860     + * Return 1 if true, 0 if false and < 0 on error.
3861     + */
3862     +static int check_ino_in_path(struct btrfs_root *root,
3863     + const u64 ino1,
3864     + const u64 ino1_gen,
3865     + const u64 ino2,
3866     + const u64 ino2_gen,
3867     + struct fs_path *fs_path)
3868     +{
3869     + u64 ino = ino2;
3870     +
3871     + if (ino1 == ino2)
3872     + return ino1_gen == ino2_gen;
3873     +
3874     + while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3875     + u64 parent;
3876     + u64 parent_gen;
3877     + int ret;
3878     +
3879     + fs_path_reset(fs_path);
3880     + ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3881     + if (ret < 0)
3882     + return ret;
3883     + if (parent == ino1)
3884     + return parent_gen == ino1_gen;
3885     + ino = parent;
3886     + }
3887     + return 0;
3888     +}
3889     +
3890     +/*
3891     + * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
3892     + * possible path (in case ino2 is not a directory and has multiple hard links).
3893     * Return 1 if true, 0 if false and < 0 on error.
3894     */
3895     static int is_ancestor(struct btrfs_root *root,
3896     @@ -3536,36 +3569,91 @@ static int is_ancestor(struct btrfs_root *root,
3897     const u64 ino2,
3898     struct fs_path *fs_path)
3899     {
3900     - u64 ino = ino2;
3901     - bool free_path = false;
3902     + bool free_fs_path = false;
3903     int ret = 0;
3904     + struct btrfs_path *path = NULL;
3905     + struct btrfs_key key;
3906    
3907     if (!fs_path) {
3908     fs_path = fs_path_alloc();
3909     if (!fs_path)
3910     return -ENOMEM;
3911     - free_path = true;
3912     + free_fs_path = true;
3913     }
3914    
3915     - while (ino > BTRFS_FIRST_FREE_OBJECTID) {
3916     - u64 parent;
3917     - u64 parent_gen;
3918     + path = alloc_path_for_send();
3919     + if (!path) {
3920     + ret = -ENOMEM;
3921     + goto out;
3922     + }
3923    
3924     - fs_path_reset(fs_path);
3925     - ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
3926     - if (ret < 0) {
3927     - if (ret == -ENOENT && ino == ino2)
3928     - ret = 0;
3929     - goto out;
3930     + key.objectid = ino2;
3931     + key.type = BTRFS_INODE_REF_KEY;
3932     + key.offset = 0;
3933     +
3934     + ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3935     + if (ret < 0)
3936     + goto out;
3937     +
3938     + while (true) {
3939     + struct extent_buffer *leaf = path->nodes[0];
3940     + int slot = path->slots[0];
3941     + u32 cur_offset = 0;
3942     + u32 item_size;
3943     +
3944     + if (slot >= btrfs_header_nritems(leaf)) {
3945     + ret = btrfs_next_leaf(root, path);
3946     + if (ret < 0)
3947     + goto out;
3948     + if (ret > 0)
3949     + break;
3950     + continue;
3951     }
3952     - if (parent == ino1) {
3953     - ret = parent_gen == ino1_gen ? 1 : 0;
3954     - goto out;
3955     +
3956     + btrfs_item_key_to_cpu(leaf, &key, slot);
3957     + if (key.objectid != ino2)
3958     + break;
3959     + if (key.type != BTRFS_INODE_REF_KEY &&
3960     + key.type != BTRFS_INODE_EXTREF_KEY)
3961     + break;
3962     +
3963     + item_size = btrfs_item_size_nr(leaf, slot);
3964     + while (cur_offset < item_size) {
3965     + u64 parent;
3966     + u64 parent_gen;
3967     +
3968     + if (key.type == BTRFS_INODE_EXTREF_KEY) {
3969     + unsigned long ptr;
3970     + struct btrfs_inode_extref *extref;
3971     +
3972     + ptr = btrfs_item_ptr_offset(leaf, slot);
3973     + extref = (struct btrfs_inode_extref *)
3974     + (ptr + cur_offset);
3975     + parent = btrfs_inode_extref_parent(leaf,
3976     + extref);
3977     + cur_offset += sizeof(*extref);
3978     + cur_offset += btrfs_inode_extref_name_len(leaf,
3979     + extref);
3980     + } else {
3981     + parent = key.offset;
3982     + cur_offset = item_size;
3983     + }
3984     +
3985     + ret = get_inode_info(root, parent, NULL, &parent_gen,
3986     + NULL, NULL, NULL, NULL);
3987     + if (ret < 0)
3988     + goto out;
3989     + ret = check_ino_in_path(root, ino1, ino1_gen,
3990     + parent, parent_gen, fs_path);
3991     + if (ret)
3992     + goto out;
3993     }
3994     - ino = parent;
3995     + path->slots[0]++;
3996     }
3997     + ret = 0;
3998     out:
3999     - if (free_path)
4000     + btrfs_free_path(path);
4001     + if (free_fs_path)
4002     fs_path_free(fs_path);
4003     return ret;
4004     }
4005     diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
4006     index c800d067fcbf..d3002842d7f6 100644
4007     --- a/fs/btrfs/tree-log.c
4008     +++ b/fs/btrfs/tree-log.c
4009     @@ -4100,7 +4100,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
4010    
4011     if (ordered_io_err) {
4012     ctx->io_err = -EIO;
4013     - return 0;
4014     + return ctx->io_err;
4015     }
4016    
4017     btrfs_init_map_token(&token);
4018     diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
4019     index 0c11121a8ace..4006b2a1233d 100644
4020     --- a/fs/btrfs/volumes.c
4021     +++ b/fs/btrfs/volumes.c
4022     @@ -1765,20 +1765,24 @@ static int btrfs_rm_dev_item(struct btrfs_fs_info *fs_info,
4023     key.offset = device->devid;
4024    
4025     ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
4026     - if (ret < 0)
4027     - goto out;
4028     -
4029     - if (ret > 0) {
4030     - ret = -ENOENT;
4031     + if (ret) {
4032     + if (ret > 0)
4033     + ret = -ENOENT;
4034     + btrfs_abort_transaction(trans, ret);
4035     + btrfs_end_transaction(trans);
4036     goto out;
4037     }
4038    
4039     ret = btrfs_del_item(trans, root, path);
4040     - if (ret)
4041     - goto out;
4042     + if (ret) {
4043     + btrfs_abort_transaction(trans, ret);
4044     + btrfs_end_transaction(trans);
4045     + }
4046     +
4047     out:
4048     btrfs_free_path(path);
4049     - btrfs_commit_transaction(trans);
4050     + if (!ret)
4051     + ret = btrfs_commit_transaction(trans);
4052     return ret;
4053     }
4054    
4055     diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
4056     index 45e96549ebd2..809cbccbad28 100644
4057     --- a/fs/lockd/svc.c
4058     +++ b/fs/lockd/svc.c
4059     @@ -57,6 +57,9 @@ static struct task_struct *nlmsvc_task;
4060     static struct svc_rqst *nlmsvc_rqst;
4061     unsigned long nlmsvc_timeout;
4062    
4063     +atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
4064     +DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
4065     +
4066     unsigned int lockd_net_id;
4067    
4068     /*
4069     @@ -292,7 +295,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
4070     struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
4071     struct sockaddr_in sin;
4072    
4073     - if (event != NETDEV_DOWN)
4074     + if ((event != NETDEV_DOWN) ||
4075     + !atomic_inc_not_zero(&nlm_ntf_refcnt))
4076     goto out;
4077    
4078     if (nlmsvc_rqst) {
4079     @@ -303,6 +307,8 @@ static int lockd_inetaddr_event(struct notifier_block *this,
4080     svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
4081     (struct sockaddr *)&sin);
4082     }
4083     + atomic_dec(&nlm_ntf_refcnt);
4084     + wake_up(&nlm_ntf_wq);
4085    
4086     out:
4087     return NOTIFY_DONE;
4088     @@ -319,7 +325,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
4089     struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
4090     struct sockaddr_in6 sin6;
4091    
4092     - if (event != NETDEV_DOWN)
4093     + if ((event != NETDEV_DOWN) ||
4094     + !atomic_inc_not_zero(&nlm_ntf_refcnt))
4095     goto out;
4096    
4097     if (nlmsvc_rqst) {
4098     @@ -331,6 +338,8 @@ static int lockd_inet6addr_event(struct notifier_block *this,
4099     svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
4100     (struct sockaddr *)&sin6);
4101     }
4102     + atomic_dec(&nlm_ntf_refcnt);
4103     + wake_up(&nlm_ntf_wq);
4104    
4105     out:
4106     return NOTIFY_DONE;
4107     @@ -347,10 +356,12 @@ static void lockd_unregister_notifiers(void)
4108     #if IS_ENABLED(CONFIG_IPV6)
4109     unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
4110     #endif
4111     + wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
4112     }
4113    
4114     static void lockd_svc_exit_thread(void)
4115     {
4116     + atomic_dec(&nlm_ntf_refcnt);
4117     lockd_unregister_notifiers();
4118     svc_exit_thread(nlmsvc_rqst);
4119     }
4120     @@ -375,6 +386,7 @@ static int lockd_start_svc(struct svc_serv *serv)
4121     goto out_rqst;
4122     }
4123    
4124     + atomic_inc(&nlm_ntf_refcnt);
4125     svc_sock_update_bufs(serv);
4126     serv->sv_maxconn = nlm_max_connections;
4127    
4128     diff --git a/fs/namespace.c b/fs/namespace.c
4129     index d18deb4c410b..adae9ffce91d 100644
4130     --- a/fs/namespace.c
4131     +++ b/fs/namespace.c
4132     @@ -2826,6 +2826,7 @@ long do_mount(const char *dev_name, const char __user *dir_name,
4133     SB_DIRSYNC |
4134     SB_SILENT |
4135     SB_POSIXACL |
4136     + SB_LAZYTIME |
4137     SB_I_VERSION);
4138    
4139     if (flags & MS_REMOUNT)
4140     diff --git a/fs/nfs_common/grace.c b/fs/nfs_common/grace.c
4141     index 420d3a0ab258..3b13fb3b0553 100644
4142     --- a/fs/nfs_common/grace.c
4143     +++ b/fs/nfs_common/grace.c
4144     @@ -30,7 +30,11 @@ locks_start_grace(struct net *net, struct lock_manager *lm)
4145     struct list_head *grace_list = net_generic(net, grace_net_id);
4146    
4147     spin_lock(&grace_lock);
4148     - list_add(&lm->list, grace_list);
4149     + if (list_empty(&lm->list))
4150     + list_add(&lm->list, grace_list);
4151     + else
4152     + WARN(1, "double list_add attempt detected in net %x %s\n",
4153     + net->ns.inum, (net == &init_net) ? "(init_net)" : "");
4154     spin_unlock(&grace_lock);
4155     }
4156     EXPORT_SYMBOL_GPL(locks_start_grace);
4157     @@ -104,7 +108,9 @@ grace_exit_net(struct net *net)
4158     {
4159     struct list_head *grace_list = net_generic(net, grace_net_id);
4160    
4161     - BUG_ON(!list_empty(grace_list));
4162     + WARN_ONCE(!list_empty(grace_list),
4163     + "net %x %s: grace_list is not empty\n",
4164     + net->ns.inum, __func__);
4165     }
4166    
4167     static struct pernet_operations grace_net_ops = {
4168     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
4169     index a439a70177a4..d89e6ccd33ba 100644
4170     --- a/fs/nfsd/nfs4state.c
4171     +++ b/fs/nfsd/nfs4state.c
4172     @@ -63,12 +63,16 @@ static const stateid_t zero_stateid = {
4173     static const stateid_t currentstateid = {
4174     .si_generation = 1,
4175     };
4176     +static const stateid_t close_stateid = {
4177     + .si_generation = 0xffffffffU,
4178     +};
4179    
4180     static u64 current_sessionid = 1;
4181    
4182     #define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
4183     #define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
4184     #define CURRENT_STATEID(stateid) (!memcmp((stateid), &currentstateid, sizeof(stateid_t)))
4185     +#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
4186    
4187     /* forward declarations */
4188     static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
4189     @@ -4866,7 +4870,8 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
4190     struct nfs4_stid *s;
4191     __be32 status = nfserr_bad_stateid;
4192    
4193     - if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4194     + if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4195     + CLOSE_STATEID(stateid))
4196     return status;
4197     /* Client debugging aid. */
4198     if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
4199     @@ -4924,7 +4929,8 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
4200     else if (typemask & NFS4_DELEG_STID)
4201     typemask |= NFS4_REVOKED_DELEG_STID;
4202    
4203     - if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
4204     + if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
4205     + CLOSE_STATEID(stateid))
4206     return nfserr_bad_stateid;
4207     status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
4208     if (status == nfserr_stale_clientid) {
4209     @@ -5177,15 +5183,9 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
4210     status = nfsd4_check_seqid(cstate, sop, seqid);
4211     if (status)
4212     return status;
4213     - if (stp->st_stid.sc_type == NFS4_CLOSED_STID
4214     - || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
4215     - /*
4216     - * "Closed" stateid's exist *only* to return
4217     - * nfserr_replay_me from the previous step, and
4218     - * revoked delegations are kept only for free_stateid.
4219     - */
4220     - return nfserr_bad_stateid;
4221     - mutex_lock(&stp->st_mutex);
4222     + status = nfsd4_lock_ol_stateid(stp);
4223     + if (status != nfs_ok)
4224     + return status;
4225     status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4226     if (status == nfs_ok)
4227     status = nfs4_check_fh(current_fh, &stp->st_stid);
4228     @@ -5411,6 +5411,11 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4229     nfsd4_close_open_stateid(stp);
4230     mutex_unlock(&stp->st_mutex);
4231    
4232     + /* See RFC5661 sectionm 18.2.4 */
4233     + if (stp->st_stid.sc_client->cl_minorversion)
4234     + memcpy(&close->cl_stateid, &close_stateid,
4235     + sizeof(close->cl_stateid));
4236     +
4237     /* put reference from nfs4_preprocess_seqid_op */
4238     nfs4_put_stid(&stp->st_stid);
4239     out:
4240     @@ -7016,6 +7021,10 @@ static int nfs4_state_create_net(struct net *net)
4241     INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
4242     nn->conf_name_tree = RB_ROOT;
4243     nn->unconf_name_tree = RB_ROOT;
4244     + nn->boot_time = get_seconds();
4245     + nn->grace_ended = false;
4246     + nn->nfsd4_manager.block_opens = true;
4247     + INIT_LIST_HEAD(&nn->nfsd4_manager.list);
4248     INIT_LIST_HEAD(&nn->client_lru);
4249     INIT_LIST_HEAD(&nn->close_lru);
4250     INIT_LIST_HEAD(&nn->del_recall_lru);
4251     @@ -7073,9 +7082,6 @@ nfs4_state_start_net(struct net *net)
4252     ret = nfs4_state_create_net(net);
4253     if (ret)
4254     return ret;
4255     - nn->boot_time = get_seconds();
4256     - nn->grace_ended = false;
4257     - nn->nfsd4_manager.block_opens = true;
4258     locks_start_grace(net, &nn->nfsd4_manager);
4259     nfsd4_client_tracking_init(net);
4260     printk(KERN_INFO "NFSD: starting %ld-second grace period (net %p)\n",
4261     diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
4262     index 9f78b5015f2e..4cd0c2336624 100644
4263     --- a/fs/quota/dquot.c
4264     +++ b/fs/quota/dquot.c
4265     @@ -934,12 +934,13 @@ static int dqinit_needed(struct inode *inode, int type)
4266     }
4267    
4268     /* This routine is guarded by s_umount semaphore */
4269     -static void add_dquot_ref(struct super_block *sb, int type)
4270     +static int add_dquot_ref(struct super_block *sb, int type)
4271     {
4272     struct inode *inode, *old_inode = NULL;
4273     #ifdef CONFIG_QUOTA_DEBUG
4274     int reserved = 0;
4275     #endif
4276     + int err = 0;
4277    
4278     spin_lock(&sb->s_inode_list_lock);
4279     list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
4280     @@ -959,7 +960,11 @@ static void add_dquot_ref(struct super_block *sb, int type)
4281     reserved = 1;
4282     #endif
4283     iput(old_inode);
4284     - __dquot_initialize(inode, type);
4285     + err = __dquot_initialize(inode, type);
4286     + if (err) {
4287     + iput(inode);
4288     + goto out;
4289     + }
4290    
4291     /*
4292     * We hold a reference to 'inode' so it couldn't have been
4293     @@ -974,7 +979,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
4294     }
4295     spin_unlock(&sb->s_inode_list_lock);
4296     iput(old_inode);
4297     -
4298     +out:
4299     #ifdef CONFIG_QUOTA_DEBUG
4300     if (reserved) {
4301     quota_error(sb, "Writes happened before quota was turned on "
4302     @@ -982,6 +987,7 @@ static void add_dquot_ref(struct super_block *sb, int type)
4303     "Please run quotacheck(8)");
4304     }
4305     #endif
4306     + return err;
4307     }
4308    
4309     /*
4310     @@ -2372,10 +2378,11 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
4311     dqopt->flags |= dquot_state_flag(flags, type);
4312     spin_unlock(&dq_state_lock);
4313    
4314     - add_dquot_ref(sb, type);
4315     -
4316     - return 0;
4317     + error = add_dquot_ref(sb, type);
4318     + if (error)
4319     + dquot_disable(sb, type, flags);
4320    
4321     + return error;
4322     out_file_init:
4323     dqopt->files[type] = NULL;
4324     iput(inode);
4325     @@ -2978,7 +2985,8 @@ static int __init dquot_init(void)
4326     pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
4327     " %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
4328    
4329     - register_shrinker(&dqcache_shrinker);
4330     + if (register_shrinker(&dqcache_shrinker))
4331     + panic("Cannot register dquot shrinker");
4332    
4333     return 0;
4334     }
4335     diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
4336     index 5464ec517702..4885c7b6e44f 100644
4337     --- a/fs/reiserfs/super.c
4338     +++ b/fs/reiserfs/super.c
4339     @@ -2591,7 +2591,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
4340     return err;
4341     if (inode->i_size < off + len - towrite)
4342     i_size_write(inode, off + len - towrite);
4343     - inode->i_version++;
4344     inode->i_mtime = inode->i_ctime = current_time(inode);
4345     mark_inode_dirty(inode);
4346     return len - towrite;
4347     diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
4348     index a3eeaba156c5..b0cccf8a81a8 100644
4349     --- a/fs/xfs/xfs_aops.c
4350     +++ b/fs/xfs/xfs_aops.c
4351     @@ -399,7 +399,7 @@ xfs_map_blocks(
4352     (ip->i_df.if_flags & XFS_IFEXTENTS));
4353     ASSERT(offset <= mp->m_super->s_maxbytes);
4354    
4355     - if (offset + count > mp->m_super->s_maxbytes)
4356     + if ((xfs_ufsize_t)offset + count > mp->m_super->s_maxbytes)
4357     count = mp->m_super->s_maxbytes - offset;
4358     end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count);
4359     offset_fsb = XFS_B_TO_FSBT(mp, offset);
4360     @@ -1265,7 +1265,7 @@ xfs_map_trim_size(
4361     if (mapping_size > size)
4362     mapping_size = size;
4363     if (offset < i_size_read(inode) &&
4364     - offset + mapping_size >= i_size_read(inode)) {
4365     + (xfs_ufsize_t)offset + mapping_size >= i_size_read(inode)) {
4366     /* limit mapping to block that spans EOF */
4367     mapping_size = roundup_64(i_size_read(inode) - offset,
4368     i_blocksize(inode));
4369     @@ -1312,7 +1312,7 @@ xfs_get_blocks(
4370     lockmode = xfs_ilock_data_map_shared(ip);
4371    
4372     ASSERT(offset <= mp->m_super->s_maxbytes);
4373     - if (offset + size > mp->m_super->s_maxbytes)
4374     + if ((xfs_ufsize_t)offset + size > mp->m_super->s_maxbytes)
4375     size = mp->m_super->s_maxbytes - offset;
4376     end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + size);
4377     offset_fsb = XFS_B_TO_FSBT(mp, offset);
4378     diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
4379     index dd136f7275e4..e5fb008d75e8 100644
4380     --- a/fs/xfs/xfs_bmap_item.c
4381     +++ b/fs/xfs/xfs_bmap_item.c
4382     @@ -389,7 +389,8 @@ xfs_bud_init(
4383     int
4384     xfs_bui_recover(
4385     struct xfs_mount *mp,
4386     - struct xfs_bui_log_item *buip)
4387     + struct xfs_bui_log_item *buip,
4388     + struct xfs_defer_ops *dfops)
4389     {
4390     int error = 0;
4391     unsigned int bui_type;
4392     @@ -404,9 +405,7 @@ xfs_bui_recover(
4393     xfs_exntst_t state;
4394     struct xfs_trans *tp;
4395     struct xfs_inode *ip = NULL;
4396     - struct xfs_defer_ops dfops;
4397     struct xfs_bmbt_irec irec;
4398     - xfs_fsblock_t firstfsb;
4399    
4400     ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));
4401    
4402     @@ -464,7 +463,6 @@ xfs_bui_recover(
4403    
4404     if (VFS_I(ip)->i_nlink == 0)
4405     xfs_iflags_set(ip, XFS_IRECOVERY);
4406     - xfs_defer_init(&dfops, &firstfsb);
4407    
4408     /* Process deferred bmap item. */
4409     state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
4410     @@ -479,16 +477,16 @@ xfs_bui_recover(
4411     break;
4412     default:
4413     error = -EFSCORRUPTED;
4414     - goto err_dfops;
4415     + goto err_inode;
4416     }
4417     xfs_trans_ijoin(tp, ip, 0);
4418    
4419     count = bmap->me_len;
4420     - error = xfs_trans_log_finish_bmap_update(tp, budp, &dfops, type,
4421     + error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type,
4422     ip, whichfork, bmap->me_startoff,
4423     bmap->me_startblock, &count, state);
4424     if (error)
4425     - goto err_dfops;
4426     + goto err_inode;
4427    
4428     if (count > 0) {
4429     ASSERT(type == XFS_BMAP_UNMAP);
4430     @@ -496,16 +494,11 @@ xfs_bui_recover(
4431     irec.br_blockcount = count;
4432     irec.br_startoff = bmap->me_startoff;
4433     irec.br_state = state;
4434     - error = xfs_bmap_unmap_extent(tp->t_mountp, &dfops, ip, &irec);
4435     + error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec);
4436     if (error)
4437     - goto err_dfops;
4438     + goto err_inode;
4439     }
4440    
4441     - /* Finish transaction, free inodes. */
4442     - error = xfs_defer_finish(&tp, &dfops);
4443     - if (error)
4444     - goto err_dfops;
4445     -
4446     set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
4447     error = xfs_trans_commit(tp);
4448     xfs_iunlock(ip, XFS_ILOCK_EXCL);
4449     @@ -513,8 +506,6 @@ xfs_bui_recover(
4450    
4451     return error;
4452    
4453     -err_dfops:
4454     - xfs_defer_cancel(&dfops);
4455     err_inode:
4456     xfs_trans_cancel(tp);
4457     if (ip) {
4458     diff --git a/fs/xfs/xfs_bmap_item.h b/fs/xfs/xfs_bmap_item.h
4459     index c867daae4a3c..24b354a2c836 100644
4460     --- a/fs/xfs/xfs_bmap_item.h
4461     +++ b/fs/xfs/xfs_bmap_item.h
4462     @@ -93,6 +93,7 @@ struct xfs_bud_log_item *xfs_bud_init(struct xfs_mount *,
4463     struct xfs_bui_log_item *);
4464     void xfs_bui_item_free(struct xfs_bui_log_item *);
4465     void xfs_bui_release(struct xfs_bui_log_item *);
4466     -int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip);
4467     +int xfs_bui_recover(struct xfs_mount *mp, struct xfs_bui_log_item *buip,
4468     + struct xfs_defer_ops *dfops);
4469    
4470     #endif /* __XFS_BMAP_ITEM_H__ */
4471     diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
4472     index 2f97c12ca75e..16f93d7356b7 100644
4473     --- a/fs/xfs/xfs_buf.c
4474     +++ b/fs/xfs/xfs_buf.c
4475     @@ -1813,22 +1813,27 @@ xfs_alloc_buftarg(
4476     btp->bt_daxdev = dax_dev;
4477    
4478     if (xfs_setsize_buftarg_early(btp, bdev))
4479     - goto error;
4480     + goto error_free;
4481    
4482     if (list_lru_init(&btp->bt_lru))
4483     - goto error;
4484     + goto error_free;
4485    
4486     if (percpu_counter_init(&btp->bt_io_count, 0, GFP_KERNEL))
4487     - goto error;
4488     + goto error_lru;
4489    
4490     btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count;
4491     btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan;
4492     btp->bt_shrinker.seeks = DEFAULT_SEEKS;
4493     btp->bt_shrinker.flags = SHRINKER_NUMA_AWARE;
4494     - register_shrinker(&btp->bt_shrinker);
4495     + if (register_shrinker(&btp->bt_shrinker))
4496     + goto error_pcpu;
4497     return btp;
4498    
4499     -error:
4500     +error_pcpu:
4501     + percpu_counter_destroy(&btp->bt_io_count);
4502     +error_lru:
4503     + list_lru_destroy(&btp->bt_lru);
4504     +error_free:
4505     kmem_free(btp);
4506     return NULL;
4507     }
4508     diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
4509     index cd82429d8df7..5a86495127fd 100644
4510     --- a/fs/xfs/xfs_dquot.c
4511     +++ b/fs/xfs/xfs_dquot.c
4512     @@ -987,14 +987,22 @@ xfs_qm_dqflush_done(
4513     * holding the lock before removing the dquot from the AIL.
4514     */
4515     if ((lip->li_flags & XFS_LI_IN_AIL) &&
4516     - lip->li_lsn == qip->qli_flush_lsn) {
4517     + ((lip->li_lsn == qip->qli_flush_lsn) ||
4518     + (lip->li_flags & XFS_LI_FAILED))) {
4519    
4520     /* xfs_trans_ail_delete() drops the AIL lock. */
4521     spin_lock(&ailp->xa_lock);
4522     - if (lip->li_lsn == qip->qli_flush_lsn)
4523     + if (lip->li_lsn == qip->qli_flush_lsn) {
4524     xfs_trans_ail_delete(ailp, lip, SHUTDOWN_CORRUPT_INCORE);
4525     - else
4526     + } else {
4527     + /*
4528     + * Clear the failed state since we are about to drop the
4529     + * flush lock
4530     + */
4531     + if (lip->li_flags & XFS_LI_FAILED)
4532     + xfs_clear_li_failed(lip);
4533     spin_unlock(&ailp->xa_lock);
4534     + }
4535     }
4536    
4537     /*
4538     diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
4539     index 2c7a1629e064..664dea105e76 100644
4540     --- a/fs/xfs/xfs_dquot_item.c
4541     +++ b/fs/xfs/xfs_dquot_item.c
4542     @@ -137,6 +137,26 @@ xfs_qm_dqunpin_wait(
4543     wait_event(dqp->q_pinwait, (atomic_read(&dqp->q_pincount) == 0));
4544     }
4545    
4546     +/*
4547     + * Callback used to mark a buffer with XFS_LI_FAILED when items in the buffer
4548     + * have been failed during writeback
4549     + *
4550     + * this informs the AIL that the dquot is already flush locked on the next push,
4551     + * and acquires a hold on the buffer to ensure that it isn't reclaimed before
4552     + * dirty data makes it to disk.
4553     + */
4554     +STATIC void
4555     +xfs_dquot_item_error(
4556     + struct xfs_log_item *lip,
4557     + struct xfs_buf *bp)
4558     +{
4559     + struct xfs_dquot *dqp;
4560     +
4561     + dqp = DQUOT_ITEM(lip)->qli_dquot;
4562     + ASSERT(!completion_done(&dqp->q_flush));
4563     + xfs_set_li_failed(lip, bp);
4564     +}
4565     +
4566     STATIC uint
4567     xfs_qm_dquot_logitem_push(
4568     struct xfs_log_item *lip,
4569     @@ -144,13 +164,28 @@ xfs_qm_dquot_logitem_push(
4570     __acquires(&lip->li_ailp->xa_lock)
4571     {
4572     struct xfs_dquot *dqp = DQUOT_ITEM(lip)->qli_dquot;
4573     - struct xfs_buf *bp = NULL;
4574     + struct xfs_buf *bp = lip->li_buf;
4575     uint rval = XFS_ITEM_SUCCESS;
4576     int error;
4577    
4578     if (atomic_read(&dqp->q_pincount) > 0)
4579     return XFS_ITEM_PINNED;
4580    
4581     + /*
4582     + * The buffer containing this item failed to be written back
4583     + * previously. Resubmit the buffer for IO
4584     + */
4585     + if (lip->li_flags & XFS_LI_FAILED) {
4586     + if (!xfs_buf_trylock(bp))
4587     + return XFS_ITEM_LOCKED;
4588     +
4589     + if (!xfs_buf_resubmit_failed_buffers(bp, lip, buffer_list))
4590     + rval = XFS_ITEM_FLUSHING;
4591     +
4592     + xfs_buf_unlock(bp);
4593     + return rval;
4594     + }
4595     +
4596     if (!xfs_dqlock_nowait(dqp))
4597     return XFS_ITEM_LOCKED;
4598    
4599     @@ -242,7 +277,8 @@ static const struct xfs_item_ops xfs_dquot_item_ops = {
4600     .iop_unlock = xfs_qm_dquot_logitem_unlock,
4601     .iop_committed = xfs_qm_dquot_logitem_committed,
4602     .iop_push = xfs_qm_dquot_logitem_push,
4603     - .iop_committing = xfs_qm_dquot_logitem_committing
4604     + .iop_committing = xfs_qm_dquot_logitem_committing,
4605     + .iop_error = xfs_dquot_item_error
4606     };
4607    
4608     /*
4609     diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
4610     index 63350906961a..cb4833d06467 100644
4611     --- a/fs/xfs/xfs_inode.c
4612     +++ b/fs/xfs/xfs_inode.c
4613     @@ -2421,6 +2421,24 @@ xfs_ifree_cluster(
4614     return 0;
4615     }
4616    
4617     +/*
4618     + * Free any local-format buffers sitting around before we reset to
4619     + * extents format.
4620     + */
4621     +static inline void
4622     +xfs_ifree_local_data(
4623     + struct xfs_inode *ip,
4624     + int whichfork)
4625     +{
4626     + struct xfs_ifork *ifp;
4627     +
4628     + if (XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL)
4629     + return;
4630     +
4631     + ifp = XFS_IFORK_PTR(ip, whichfork);
4632     + xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
4633     +}
4634     +
4635     /*
4636     * This is called to return an inode to the inode free list.
4637     * The inode should already be truncated to 0 length and have
4638     @@ -2458,6 +2476,9 @@ xfs_ifree(
4639     if (error)
4640     return error;
4641    
4642     + xfs_ifree_local_data(ip, XFS_DATA_FORK);
4643     + xfs_ifree_local_data(ip, XFS_ATTR_FORK);
4644     +
4645     VFS_I(ip)->i_mode = 0; /* mark incore inode as free */
4646     ip->i_d.di_flags = 0;
4647     ip->i_d.di_dmevmask = 0;
4648     diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
4649     index d6e049fdd977..eaf29646c28f 100644
4650     --- a/fs/xfs/xfs_log_recover.c
4651     +++ b/fs/xfs/xfs_log_recover.c
4652     @@ -24,6 +24,7 @@
4653     #include "xfs_bit.h"
4654     #include "xfs_sb.h"
4655     #include "xfs_mount.h"
4656     +#include "xfs_defer.h"
4657     #include "xfs_da_format.h"
4658     #include "xfs_da_btree.h"
4659     #include "xfs_inode.h"
4660     @@ -4714,7 +4715,8 @@ STATIC int
4661     xlog_recover_process_cui(
4662     struct xfs_mount *mp,
4663     struct xfs_ail *ailp,
4664     - struct xfs_log_item *lip)
4665     + struct xfs_log_item *lip,
4666     + struct xfs_defer_ops *dfops)
4667     {
4668     struct xfs_cui_log_item *cuip;
4669     int error;
4670     @@ -4727,7 +4729,7 @@ xlog_recover_process_cui(
4671     return 0;
4672    
4673     spin_unlock(&ailp->xa_lock);
4674     - error = xfs_cui_recover(mp, cuip);
4675     + error = xfs_cui_recover(mp, cuip, dfops);
4676     spin_lock(&ailp->xa_lock);
4677    
4678     return error;
4679     @@ -4754,7 +4756,8 @@ STATIC int
4680     xlog_recover_process_bui(
4681     struct xfs_mount *mp,
4682     struct xfs_ail *ailp,
4683     - struct xfs_log_item *lip)
4684     + struct xfs_log_item *lip,
4685     + struct xfs_defer_ops *dfops)
4686     {
4687     struct xfs_bui_log_item *buip;
4688     int error;
4689     @@ -4767,7 +4770,7 @@ xlog_recover_process_bui(
4690     return 0;
4691    
4692     spin_unlock(&ailp->xa_lock);
4693     - error = xfs_bui_recover(mp, buip);
4694     + error = xfs_bui_recover(mp, buip, dfops);
4695     spin_lock(&ailp->xa_lock);
4696    
4697     return error;
4698     @@ -4803,6 +4806,46 @@ static inline bool xlog_item_is_intent(struct xfs_log_item *lip)
4699     }
4700     }
4701    
4702     +/* Take all the collected deferred ops and finish them in order. */
4703     +static int
4704     +xlog_finish_defer_ops(
4705     + struct xfs_mount *mp,
4706     + struct xfs_defer_ops *dfops)
4707     +{
4708     + struct xfs_trans *tp;
4709     + int64_t freeblks;
4710     + uint resblks;
4711     + int error;
4712     +
4713     + /*
4714     + * We're finishing the defer_ops that accumulated as a result of
4715     + * recovering unfinished intent items during log recovery. We
4716     + * reserve an itruncate transaction because it is the largest
4717     + * permanent transaction type. Since we're the only user of the fs
4718     + * right now, take 93% (15/16) of the available free blocks. Use
4719     + * weird math to avoid a 64-bit division.
4720     + */
4721     + freeblks = percpu_counter_sum(&mp->m_fdblocks);
4722     + if (freeblks <= 0)
4723     + return -ENOSPC;
4724     + resblks = min_t(int64_t, UINT_MAX, freeblks);
4725     + resblks = (resblks * 15) >> 4;
4726     + error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, resblks,
4727     + 0, XFS_TRANS_RESERVE, &tp);
4728     + if (error)
4729     + return error;
4730     +
4731     + error = xfs_defer_finish(&tp, dfops);
4732     + if (error)
4733     + goto out_cancel;
4734     +
4735     + return xfs_trans_commit(tp);
4736     +
4737     +out_cancel:
4738     + xfs_trans_cancel(tp);
4739     + return error;
4740     +}
4741     +
4742     /*
4743     * When this is called, all of the log intent items which did not have
4744     * corresponding log done items should be in the AIL. What we do now
4745     @@ -4823,10 +4866,12 @@ STATIC int
4746     xlog_recover_process_intents(
4747     struct xlog *log)
4748     {
4749     - struct xfs_log_item *lip;
4750     - int error = 0;
4751     + struct xfs_defer_ops dfops;
4752     struct xfs_ail_cursor cur;
4753     + struct xfs_log_item *lip;
4754     struct xfs_ail *ailp;
4755     + xfs_fsblock_t firstfsb;
4756     + int error = 0;
4757     #if defined(DEBUG) || defined(XFS_WARN)
4758     xfs_lsn_t last_lsn;
4759     #endif
4760     @@ -4837,6 +4882,7 @@ xlog_recover_process_intents(
4761     #if defined(DEBUG) || defined(XFS_WARN)
4762     last_lsn = xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block);
4763     #endif
4764     + xfs_defer_init(&dfops, &firstfsb);
4765     while (lip != NULL) {
4766     /*
4767     * We're done when we see something other than an intent.
4768     @@ -4857,6 +4903,12 @@ xlog_recover_process_intents(
4769     */
4770     ASSERT(XFS_LSN_CMP(last_lsn, lip->li_lsn) >= 0);
4771    
4772     + /*
4773     + * NOTE: If your intent processing routine can create more
4774     + * deferred ops, you /must/ attach them to the dfops in this
4775     + * routine or else those subsequent intents will get
4776     + * replayed in the wrong order!
4777     + */
4778     switch (lip->li_type) {
4779     case XFS_LI_EFI:
4780     error = xlog_recover_process_efi(log->l_mp, ailp, lip);
4781     @@ -4865,10 +4917,12 @@ xlog_recover_process_intents(
4782     error = xlog_recover_process_rui(log->l_mp, ailp, lip);
4783     break;
4784     case XFS_LI_CUI:
4785     - error = xlog_recover_process_cui(log->l_mp, ailp, lip);
4786     + error = xlog_recover_process_cui(log->l_mp, ailp, lip,
4787     + &dfops);
4788     break;
4789     case XFS_LI_BUI:
4790     - error = xlog_recover_process_bui(log->l_mp, ailp, lip);
4791     + error = xlog_recover_process_bui(log->l_mp, ailp, lip,
4792     + &dfops);
4793     break;
4794     }
4795     if (error)
4796     @@ -4878,6 +4932,11 @@ xlog_recover_process_intents(
4797     out:
4798     xfs_trans_ail_cursor_done(&cur);
4799     spin_unlock(&ailp->xa_lock);
4800     + if (error)
4801     + xfs_defer_cancel(&dfops);
4802     + else
4803     + error = xlog_finish_defer_ops(log->l_mp, &dfops);
4804     +
4805     return error;
4806     }
4807    
4808     diff --git a/fs/xfs/xfs_refcount_item.c b/fs/xfs/xfs_refcount_item.c
4809     index 8f2e2fac4255..3a55d6fc271b 100644
4810     --- a/fs/xfs/xfs_refcount_item.c
4811     +++ b/fs/xfs/xfs_refcount_item.c
4812     @@ -393,7 +393,8 @@ xfs_cud_init(
4813     int
4814     xfs_cui_recover(
4815     struct xfs_mount *mp,
4816     - struct xfs_cui_log_item *cuip)
4817     + struct xfs_cui_log_item *cuip,
4818     + struct xfs_defer_ops *dfops)
4819     {
4820     int i;
4821     int error = 0;
4822     @@ -405,11 +406,9 @@ xfs_cui_recover(
4823     struct xfs_trans *tp;
4824     struct xfs_btree_cur *rcur = NULL;
4825     enum xfs_refcount_intent_type type;
4826     - xfs_fsblock_t firstfsb;
4827     xfs_fsblock_t new_fsb;
4828     xfs_extlen_t new_len;
4829     struct xfs_bmbt_irec irec;
4830     - struct xfs_defer_ops dfops;
4831     bool requeue_only = false;
4832    
4833     ASSERT(!test_bit(XFS_CUI_RECOVERED, &cuip->cui_flags));
4834     @@ -465,7 +464,6 @@ xfs_cui_recover(
4835     return error;
4836     cudp = xfs_trans_get_cud(tp, cuip);
4837    
4838     - xfs_defer_init(&dfops, &firstfsb);
4839     for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
4840     refc = &cuip->cui_format.cui_extents[i];
4841     refc_type = refc->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
4842     @@ -485,7 +483,7 @@ xfs_cui_recover(
4843     new_len = refc->pe_len;
4844     } else
4845     error = xfs_trans_log_finish_refcount_update(tp, cudp,
4846     - &dfops, type, refc->pe_startblock, refc->pe_len,
4847     + dfops, type, refc->pe_startblock, refc->pe_len,
4848     &new_fsb, &new_len, &rcur);
4849     if (error)
4850     goto abort_error;
4851     @@ -497,21 +495,21 @@ xfs_cui_recover(
4852     switch (type) {
4853     case XFS_REFCOUNT_INCREASE:
4854     error = xfs_refcount_increase_extent(
4855     - tp->t_mountp, &dfops, &irec);
4856     + tp->t_mountp, dfops, &irec);
4857     break;
4858     case XFS_REFCOUNT_DECREASE:
4859     error = xfs_refcount_decrease_extent(
4860     - tp->t_mountp, &dfops, &irec);
4861     + tp->t_mountp, dfops, &irec);
4862     break;
4863     case XFS_REFCOUNT_ALLOC_COW:
4864     error = xfs_refcount_alloc_cow_extent(
4865     - tp->t_mountp, &dfops,
4866     + tp->t_mountp, dfops,
4867     irec.br_startblock,
4868     irec.br_blockcount);
4869     break;
4870     case XFS_REFCOUNT_FREE_COW:
4871     error = xfs_refcount_free_cow_extent(
4872     - tp->t_mountp, &dfops,
4873     + tp->t_mountp, dfops,
4874     irec.br_startblock,
4875     irec.br_blockcount);
4876     break;
4877     @@ -525,17 +523,12 @@ xfs_cui_recover(
4878     }
4879    
4880     xfs_refcount_finish_one_cleanup(tp, rcur, error);
4881     - error = xfs_defer_finish(&tp, &dfops);
4882     - if (error)
4883     - goto abort_defer;
4884     set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
4885     error = xfs_trans_commit(tp);
4886     return error;
4887    
4888     abort_error:
4889     xfs_refcount_finish_one_cleanup(tp, rcur, error);
4890     -abort_defer:
4891     - xfs_defer_cancel(&dfops);
4892     xfs_trans_cancel(tp);
4893     return error;
4894     }
4895     diff --git a/fs/xfs/xfs_refcount_item.h b/fs/xfs/xfs_refcount_item.h
4896     index 5b74dddfa64b..0e5327349a13 100644
4897     --- a/fs/xfs/xfs_refcount_item.h
4898     +++ b/fs/xfs/xfs_refcount_item.h
4899     @@ -96,6 +96,7 @@ struct xfs_cud_log_item *xfs_cud_init(struct xfs_mount *,
4900     struct xfs_cui_log_item *);
4901     void xfs_cui_item_free(struct xfs_cui_log_item *);
4902     void xfs_cui_release(struct xfs_cui_log_item *);
4903     -int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip);
4904     +int xfs_cui_recover(struct xfs_mount *mp, struct xfs_cui_log_item *cuip,
4905     + struct xfs_defer_ops *dfops);
4906    
4907     #endif /* __XFS_REFCOUNT_ITEM_H__ */
4908     diff --git a/include/crypto/gcm.h b/include/crypto/gcm.h
4909     new file mode 100644
4910     index 000000000000..c50e057ea17e
4911     --- /dev/null
4912     +++ b/include/crypto/gcm.h
4913     @@ -0,0 +1,8 @@
4914     +#ifndef _CRYPTO_GCM_H
4915     +#define _CRYPTO_GCM_H
4916     +
4917     +#define GCM_AES_IV_SIZE 12
4918     +#define GCM_RFC4106_IV_SIZE 8
4919     +#define GCM_RFC4543_IV_SIZE 8
4920     +
4921     +#endif
4922     diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
4923     index 6882538eda32..5a8019befafd 100644
4924     --- a/include/linux/kvm_host.h
4925     +++ b/include/linux/kvm_host.h
4926     @@ -714,6 +714,9 @@ int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
4927     unsigned long len);
4928     void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
4929    
4930     +void kvm_sigset_activate(struct kvm_vcpu *vcpu);
4931     +void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
4932     +
4933     void kvm_vcpu_block(struct kvm_vcpu *vcpu);
4934     void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
4935     void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
4936     diff --git a/include/linux/tty.h b/include/linux/tty.h
4937     index 7ac8ba208b1f..0a6c71e0ad01 100644
4938     --- a/include/linux/tty.h
4939     +++ b/include/linux/tty.h
4940     @@ -405,6 +405,8 @@ extern const char *tty_name(const struct tty_struct *tty);
4941     extern struct tty_struct *tty_kopen(dev_t device);
4942     extern void tty_kclose(struct tty_struct *tty);
4943     extern int tty_dev_name_to_number(const char *name, dev_t *number);
4944     +extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout);
4945     +extern void tty_ldisc_unlock(struct tty_struct *tty);
4946     #else
4947     static inline void tty_kref_put(struct tty_struct *tty)
4948     { }
4949     diff --git a/include/net/mac80211.h b/include/net/mac80211.h
4950     index 885690fa39c8..4f1d2dec43ce 100644
4951     --- a/include/net/mac80211.h
4952     +++ b/include/net/mac80211.h
4953     @@ -4470,18 +4470,24 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
4954     * ieee80211_nullfunc_get - retrieve a nullfunc template
4955     * @hw: pointer obtained from ieee80211_alloc_hw().
4956     * @vif: &struct ieee80211_vif pointer from the add_interface callback.
4957     + * @qos_ok: QoS NDP is acceptable to the caller, this should be set
4958     + * if at all possible
4959     *
4960     * Creates a Nullfunc template which can, for example, uploaded to
4961     * hardware. The template must be updated after association so that correct
4962     * BSSID and address is used.
4963     *
4964     + * If @qos_ndp is set and the association is to an AP with QoS/WMM, the
4965     + * returned packet will be QoS NDP.
4966     + *
4967     * Note: Caller (or hardware) is responsible for setting the
4968     * &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
4969     *
4970     * Return: The nullfunc template. %NULL on error.
4971     */
4972     struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
4973     - struct ieee80211_vif *vif);
4974     + struct ieee80211_vif *vif,
4975     + bool qos_ok);
4976    
4977     /**
4978     * ieee80211_probereq_get - retrieve a Probe Request template
4979     diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
4980     index ebe96796027a..a58caf5807ff 100644
4981     --- a/include/trace/events/rxrpc.h
4982     +++ b/include/trace/events/rxrpc.h
4983     @@ -49,6 +49,7 @@ enum rxrpc_conn_trace {
4984     rxrpc_conn_put_client,
4985     rxrpc_conn_put_service,
4986     rxrpc_conn_queued,
4987     + rxrpc_conn_reap_service,
4988     rxrpc_conn_seen,
4989     };
4990    
4991     @@ -206,6 +207,7 @@ enum rxrpc_congest_change {
4992     EM(rxrpc_conn_put_client, "PTc") \
4993     EM(rxrpc_conn_put_service, "PTs") \
4994     EM(rxrpc_conn_queued, "QUE") \
4995     + EM(rxrpc_conn_reap_service, "RPs") \
4996     E_(rxrpc_conn_seen, "SEE")
4997    
4998     #define rxrpc_client_traces \
4999     diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
5000     index 26283fefdf5f..f7015aa12347 100644
5001     --- a/include/uapi/linux/kfd_ioctl.h
5002     +++ b/include/uapi/linux/kfd_ioctl.h
5003     @@ -233,29 +233,29 @@ struct kfd_ioctl_wait_events_args {
5004     };
5005    
5006     struct kfd_ioctl_set_scratch_backing_va_args {
5007     - uint64_t va_addr; /* to KFD */
5008     - uint32_t gpu_id; /* to KFD */
5009     - uint32_t pad;
5010     + __u64 va_addr; /* to KFD */
5011     + __u32 gpu_id; /* to KFD */
5012     + __u32 pad;
5013     };
5014    
5015     struct kfd_ioctl_get_tile_config_args {
5016     /* to KFD: pointer to tile array */
5017     - uint64_t tile_config_ptr;
5018     + __u64 tile_config_ptr;
5019     /* to KFD: pointer to macro tile array */
5020     - uint64_t macro_tile_config_ptr;
5021     + __u64 macro_tile_config_ptr;
5022     /* to KFD: array size allocated by user mode
5023     * from KFD: array size filled by kernel
5024     */
5025     - uint32_t num_tile_configs;
5026     + __u32 num_tile_configs;
5027     /* to KFD: array size allocated by user mode
5028     * from KFD: array size filled by kernel
5029     */
5030     - uint32_t num_macro_tile_configs;
5031     + __u32 num_macro_tile_configs;
5032    
5033     - uint32_t gpu_id; /* to KFD */
5034     - uint32_t gb_addr_config; /* from KFD */
5035     - uint32_t num_banks; /* from KFD */
5036     - uint32_t num_ranks; /* from KFD */
5037     + __u32 gpu_id; /* to KFD */
5038     + __u32 gb_addr_config; /* from KFD */
5039     + __u32 num_banks; /* from KFD */
5040     + __u32 num_ranks; /* from KFD */
5041     /* struct size can be extended later if needed
5042     * without breaking ABI compatibility
5043     */
5044     diff --git a/kernel/events/core.c b/kernel/events/core.c
5045     index 24ebad5567b4..8c20af8738ac 100644
5046     --- a/kernel/events/core.c
5047     +++ b/kernel/events/core.c
5048     @@ -6756,6 +6756,7 @@ static void perf_fill_ns_link_info(struct perf_ns_link_info *ns_link_info,
5049     ns_inode = ns_path.dentry->d_inode;
5050     ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
5051     ns_link_info->ino = ns_inode->i_ino;
5052     + path_put(&ns_path);
5053     }
5054     }
5055    
5056     diff --git a/kernel/futex.c b/kernel/futex.c
5057     index 52b3f4703158..046cd780d057 100644
5058     --- a/kernel/futex.c
5059     +++ b/kernel/futex.c
5060     @@ -2311,9 +2311,6 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
5061     raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
5062    
5063     oldowner = pi_state->owner;
5064     - /* Owner died? */
5065     - if (!pi_state->owner)
5066     - newtid |= FUTEX_OWNER_DIED;
5067    
5068     /*
5069     * We are here because either:
5070     @@ -2374,6 +2371,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
5071     }
5072    
5073     newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
5074     + /* Owner died? */
5075     + if (!pi_state->owner)
5076     + newtid |= FUTEX_OWNER_DIED;
5077    
5078     if (get_futex_value_locked(&uval, uaddr))
5079     goto handle_fault;
5080     diff --git a/lib/test_firmware.c b/lib/test_firmware.c
5081     index 64a4c76cba2b..e7008688769b 100644
5082     --- a/lib/test_firmware.c
5083     +++ b/lib/test_firmware.c
5084     @@ -371,6 +371,7 @@ static ssize_t config_num_requests_store(struct device *dev,
5085     if (test_fw_config->reqs) {
5086     pr_err("Must call release_all_firmware prior to changing config\n");
5087     rc = -EINVAL;
5088     + mutex_unlock(&test_fw_mutex);
5089     goto out;
5090     }
5091     mutex_unlock(&test_fw_mutex);
5092     diff --git a/mm/kmemleak.c b/mm/kmemleak.c
5093     index 7780cd83a495..a1ba553816eb 100644
5094     --- a/mm/kmemleak.c
5095     +++ b/mm/kmemleak.c
5096     @@ -1532,6 +1532,8 @@ static void kmemleak_scan(void)
5097     if (page_count(page) == 0)
5098     continue;
5099     scan_block(page, page + 1, NULL);
5100     + if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
5101     + cond_resched();
5102     }
5103     }
5104     put_online_mems();
5105     diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
5106     index d8bbd0d2225a..d6d3f316de4c 100644
5107     --- a/net/mac80211/mesh_hwmp.c
5108     +++ b/net/mac80211/mesh_hwmp.c
5109     @@ -797,7 +797,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
5110     struct mesh_path *mpath;
5111     u8 ttl, flags, hopcount;
5112     const u8 *orig_addr;
5113     - u32 orig_sn, metric, metric_txsta, interval;
5114     + u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
5115     bool root_is_gate;
5116    
5117     ttl = rann->rann_ttl;
5118     @@ -808,7 +808,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
5119     interval = le32_to_cpu(rann->rann_interval);
5120     hopcount = rann->rann_hopcount;
5121     hopcount++;
5122     - metric = le32_to_cpu(rann->rann_metric);
5123     + orig_metric = le32_to_cpu(rann->rann_metric);
5124    
5125     /* Ignore our own RANNs */
5126     if (ether_addr_equal(orig_addr, sdata->vif.addr))
5127     @@ -825,7 +825,10 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
5128     return;
5129     }
5130    
5131     - metric_txsta = airtime_link_metric_get(local, sta);
5132     + last_hop_metric = airtime_link_metric_get(local, sta);
5133     + new_metric = orig_metric + last_hop_metric;
5134     + if (new_metric < orig_metric)
5135     + new_metric = MAX_METRIC;
5136    
5137     mpath = mesh_path_lookup(sdata, orig_addr);
5138     if (!mpath) {
5139     @@ -838,7 +841,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
5140     }
5141    
5142     if (!(SN_LT(mpath->sn, orig_sn)) &&
5143     - !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
5144     + !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
5145     rcu_read_unlock();
5146     return;
5147     }
5148     @@ -856,7 +859,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
5149     }
5150    
5151     mpath->sn = orig_sn;
5152     - mpath->rann_metric = metric + metric_txsta;
5153     + mpath->rann_metric = new_metric;
5154     mpath->is_root = true;
5155     /* Recording RANNs sender address to send individually
5156     * addressed PREQs destined for root mesh STA */
5157     @@ -876,7 +879,7 @@ static void hwmp_rann_frame_process(struct ieee80211_sub_if_data *sdata,
5158     mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
5159     orig_sn, 0, NULL, 0, broadcast_addr,
5160     hopcount, ttl, interval,
5161     - metric + metric_txsta, 0, sdata);
5162     + new_metric, 0, sdata);
5163     }
5164    
5165     rcu_read_unlock();
5166     diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
5167     index 3b8e2709d8de..9115cc52ce83 100644
5168     --- a/net/mac80211/mlme.c
5169     +++ b/net/mac80211/mlme.c
5170     @@ -908,7 +908,7 @@ void ieee80211_send_nullfunc(struct ieee80211_local *local,
5171     struct ieee80211_hdr_3addr *nullfunc;
5172     struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
5173    
5174     - skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
5175     + skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true);
5176     if (!skb)
5177     return;
5178    
5179     diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
5180     index 94826680cf2b..73429841f115 100644
5181     --- a/net/mac80211/tx.c
5182     +++ b/net/mac80211/tx.c
5183     @@ -4404,13 +4404,15 @@ struct sk_buff *ieee80211_pspoll_get(struct ieee80211_hw *hw,
5184     EXPORT_SYMBOL(ieee80211_pspoll_get);
5185    
5186     struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
5187     - struct ieee80211_vif *vif)
5188     + struct ieee80211_vif *vif,
5189     + bool qos_ok)
5190     {
5191     struct ieee80211_hdr_3addr *nullfunc;
5192     struct ieee80211_sub_if_data *sdata;
5193     struct ieee80211_if_managed *ifmgd;
5194     struct ieee80211_local *local;
5195     struct sk_buff *skb;
5196     + bool qos = false;
5197    
5198     if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
5199     return NULL;
5200     @@ -4419,7 +4421,17 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
5201     ifmgd = &sdata->u.mgd;
5202     local = sdata->local;
5203    
5204     - skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
5205     + if (qos_ok) {
5206     + struct sta_info *sta;
5207     +
5208     + rcu_read_lock();
5209     + sta = sta_info_get(sdata, ifmgd->bssid);
5210     + qos = sta && sta->sta.wme;
5211     + rcu_read_unlock();
5212     + }
5213     +
5214     + skb = dev_alloc_skb(local->hw.extra_tx_headroom +
5215     + sizeof(*nullfunc) + 2);
5216     if (!skb)
5217     return NULL;
5218    
5219     @@ -4429,6 +4441,19 @@ struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
5220     nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
5221     IEEE80211_STYPE_NULLFUNC |
5222     IEEE80211_FCTL_TODS);
5223     + if (qos) {
5224     + __le16 qos = cpu_to_le16(7);
5225     +
5226     + BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
5227     + IEEE80211_STYPE_NULLFUNC) !=
5228     + IEEE80211_STYPE_QOS_NULLFUNC);
5229     + nullfunc->frame_control |=
5230     + cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
5231     + skb->priority = 7;
5232     + skb_set_queue_mapping(skb, IEEE80211_AC_VO);
5233     + skb_put_data(skb, &qos, sizeof(qos));
5234     + }
5235     +
5236     memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
5237     memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
5238     memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
5239     diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
5240     index e8eb427ce6d1..0d9f6afa266c 100644
5241     --- a/net/openvswitch/flow_netlink.c
5242     +++ b/net/openvswitch/flow_netlink.c
5243     @@ -1903,14 +1903,11 @@ int ovs_nla_put_mask(const struct sw_flow *flow, struct sk_buff *skb)
5244    
5245     #define MAX_ACTIONS_BUFSIZE (32 * 1024)
5246    
5247     -static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
5248     +static struct sw_flow_actions *nla_alloc_flow_actions(int size)
5249     {
5250     struct sw_flow_actions *sfa;
5251    
5252     - if (size > MAX_ACTIONS_BUFSIZE) {
5253     - OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
5254     - return ERR_PTR(-EINVAL);
5255     - }
5256     + WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
5257    
5258     sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
5259     if (!sfa)
5260     @@ -1983,12 +1980,15 @@ static struct nlattr *reserve_sfa_size(struct sw_flow_actions **sfa,
5261     new_acts_size = ksize(*sfa) * 2;
5262    
5263     if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
5264     - if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
5265     + if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
5266     + OVS_NLERR(log, "Flow action size exceeds max %u",
5267     + MAX_ACTIONS_BUFSIZE);
5268     return ERR_PTR(-EMSGSIZE);
5269     + }
5270     new_acts_size = MAX_ACTIONS_BUFSIZE;
5271     }
5272    
5273     - acts = nla_alloc_flow_actions(new_acts_size, log);
5274     + acts = nla_alloc_flow_actions(new_acts_size);
5275     if (IS_ERR(acts))
5276     return (void *)acts;
5277    
5278     @@ -2660,7 +2660,7 @@ int ovs_nla_copy_actions(struct net *net, const struct nlattr *attr,
5279     {
5280     int err;
5281    
5282     - *sfa = nla_alloc_flow_actions(nla_len(attr), log);
5283     + *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
5284     if (IS_ERR(*sfa))
5285     return PTR_ERR(*sfa);
5286    
5287     diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
5288     index 4b0a8288c98a..7c1cb08874d5 100644
5289     --- a/net/rxrpc/af_rxrpc.c
5290     +++ b/net/rxrpc/af_rxrpc.c
5291     @@ -823,6 +823,19 @@ static int rxrpc_release_sock(struct sock *sk)
5292     sock_orphan(sk);
5293     sk->sk_shutdown = SHUTDOWN_MASK;
5294    
5295     + /* We want to kill off all connections from a service socket
5296     + * as fast as possible because we can't share these; client
5297     + * sockets, on the other hand, can share an endpoint.
5298     + */
5299     + switch (sk->sk_state) {
5300     + case RXRPC_SERVER_BOUND:
5301     + case RXRPC_SERVER_BOUND2:
5302     + case RXRPC_SERVER_LISTENING:
5303     + case RXRPC_SERVER_LISTEN_DISABLED:
5304     + rx->local->service_closed = true;
5305     + break;
5306     + }
5307     +
5308     spin_lock_bh(&sk->sk_receive_queue.lock);
5309     sk->sk_state = RXRPC_CLOSE;
5310     spin_unlock_bh(&sk->sk_receive_queue.lock);
5311     diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
5312     index ea5600b747cc..e6c2c4f56fb1 100644
5313     --- a/net/rxrpc/ar-internal.h
5314     +++ b/net/rxrpc/ar-internal.h
5315     @@ -84,6 +84,7 @@ struct rxrpc_net {
5316     unsigned int nr_client_conns;
5317     unsigned int nr_active_client_conns;
5318     bool kill_all_client_conns;
5319     + bool live;
5320     spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
5321     spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
5322     struct list_head waiting_client_conns;
5323     @@ -265,6 +266,7 @@ struct rxrpc_local {
5324     rwlock_t services_lock; /* lock for services list */
5325     int debug_id; /* debug ID for printks */
5326     bool dead;
5327     + bool service_closed; /* Service socket closed */
5328     struct sockaddr_rxrpc srx; /* local address */
5329     };
5330    
5331     @@ -671,7 +673,7 @@ extern unsigned int rxrpc_max_call_lifetime;
5332     extern struct kmem_cache *rxrpc_call_jar;
5333    
5334     struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
5335     -struct rxrpc_call *rxrpc_alloc_call(gfp_t);
5336     +struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
5337     struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
5338     struct rxrpc_conn_parameters *,
5339     struct sockaddr_rxrpc *,
5340     @@ -824,6 +826,7 @@ void rxrpc_process_connection(struct work_struct *);
5341     * conn_object.c
5342     */
5343     extern unsigned int rxrpc_connection_expiry;
5344     +extern unsigned int rxrpc_closed_conn_expiry;
5345    
5346     struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
5347     struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
5348     diff --git a/net/rxrpc/call_accept.c b/net/rxrpc/call_accept.c
5349     index cbd1701e813a..3028298ca561 100644
5350     --- a/net/rxrpc/call_accept.c
5351     +++ b/net/rxrpc/call_accept.c
5352     @@ -94,7 +94,7 @@ static int rxrpc_service_prealloc_one(struct rxrpc_sock *rx,
5353     /* Now it gets complicated, because calls get registered with the
5354     * socket here, particularly if a user ID is preassigned by the user.
5355     */
5356     - call = rxrpc_alloc_call(gfp);
5357     + call = rxrpc_alloc_call(rx, gfp);
5358     if (!call)
5359     return -ENOMEM;
5360     call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
5361     diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
5362     index fcdd6555a820..8a5a42e8ec23 100644
5363     --- a/net/rxrpc/call_object.c
5364     +++ b/net/rxrpc/call_object.c
5365     @@ -55,6 +55,8 @@ static void rxrpc_call_timer_expired(unsigned long _call)
5366     rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
5367     }
5368    
5369     +static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
5370     +
5371     /*
5372     * find an extant server call
5373     * - called in process context with IRQs enabled
5374     @@ -95,7 +97,7 @@ struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *rx,
5375     /*
5376     * allocate a new call
5377     */
5378     -struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
5379     +struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
5380     {
5381     struct rxrpc_call *call;
5382    
5383     @@ -114,6 +116,14 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
5384     goto nomem_2;
5385    
5386     mutex_init(&call->user_mutex);
5387     +
5388     + /* Prevent lockdep reporting a deadlock false positive between the afs
5389     + * filesystem and sys_sendmsg() via the mmap sem.
5390     + */
5391     + if (rx->sk.sk_kern_sock)
5392     + lockdep_set_class(&call->user_mutex,
5393     + &rxrpc_call_user_mutex_lock_class_key);
5394     +
5395     setup_timer(&call->timer, rxrpc_call_timer_expired,
5396     (unsigned long)call);
5397     INIT_WORK(&call->processor, &rxrpc_process_call);
5398     @@ -150,7 +160,8 @@ struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
5399     /*
5400     * Allocate a new client call.
5401     */
5402     -static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
5403     +static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
5404     + struct sockaddr_rxrpc *srx,
5405     gfp_t gfp)
5406     {
5407     struct rxrpc_call *call;
5408     @@ -158,7 +169,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
5409    
5410     _enter("");
5411    
5412     - call = rxrpc_alloc_call(gfp);
5413     + call = rxrpc_alloc_call(rx, gfp);
5414     if (!call)
5415     return ERR_PTR(-ENOMEM);
5416     call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
5417     @@ -209,7 +220,7 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
5418    
5419     _enter("%p,%lx", rx, user_call_ID);
5420    
5421     - call = rxrpc_alloc_client_call(srx, gfp);
5422     + call = rxrpc_alloc_client_call(rx, srx, gfp);
5423     if (IS_ERR(call)) {
5424     release_sock(&rx->sk);
5425     _leave(" = %ld", PTR_ERR(call));
5426     diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
5427     index 5f9624bd311c..78a154173d90 100644
5428     --- a/net/rxrpc/conn_client.c
5429     +++ b/net/rxrpc/conn_client.c
5430     @@ -1061,6 +1061,8 @@ void rxrpc_discard_expired_client_conns(struct work_struct *work)
5431     expiry = rxrpc_conn_idle_client_expiry;
5432     if (nr_conns > rxrpc_reap_client_connections)
5433     expiry = rxrpc_conn_idle_client_fast_expiry;
5434     + if (conn->params.local->service_closed)
5435     + expiry = rxrpc_closed_conn_expiry * HZ;
5436    
5437     conn_expires_at = conn->idle_timestamp + expiry;
5438    
5439     diff --git a/net/rxrpc/conn_object.c b/net/rxrpc/conn_object.c
5440     index fe575798592f..a48c817b792b 100644
5441     --- a/net/rxrpc/conn_object.c
5442     +++ b/net/rxrpc/conn_object.c
5443     @@ -20,7 +20,8 @@
5444     /*
5445     * Time till a connection expires after last use (in seconds).
5446     */
5447     -unsigned int rxrpc_connection_expiry = 10 * 60;
5448     +unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
5449     +unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
5450    
5451     static void rxrpc_destroy_connection(struct rcu_head *);
5452    
5453     @@ -312,7 +313,7 @@ void rxrpc_put_service_conn(struct rxrpc_connection *conn)
5454     n = atomic_dec_return(&conn->usage);
5455     trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
5456     ASSERTCMP(n, >=, 0);
5457     - if (n == 0) {
5458     + if (n == 1) {
5459     rxnet = conn->params.local->rxnet;
5460     rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
5461     }
5462     @@ -353,15 +354,14 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
5463     struct rxrpc_net *rxnet =
5464     container_of(to_delayed_work(work),
5465     struct rxrpc_net, service_conn_reaper);
5466     - unsigned long reap_older_than, earliest, idle_timestamp, now;
5467     + unsigned long expire_at, earliest, idle_timestamp, now;
5468    
5469     LIST_HEAD(graveyard);
5470    
5471     _enter("");
5472    
5473     now = jiffies;
5474     - reap_older_than = now - rxrpc_connection_expiry * HZ;
5475     - earliest = ULONG_MAX;
5476     + earliest = now + MAX_JIFFY_OFFSET;
5477    
5478     write_lock(&rxnet->conn_lock);
5479     list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
5480     @@ -371,15 +371,21 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
5481     if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
5482     continue;
5483    
5484     - idle_timestamp = READ_ONCE(conn->idle_timestamp);
5485     - _debug("reap CONN %d { u=%d,t=%ld }",
5486     - conn->debug_id, atomic_read(&conn->usage),
5487     - (long)reap_older_than - (long)idle_timestamp);
5488     -
5489     - if (time_after(idle_timestamp, reap_older_than)) {
5490     - if (time_before(idle_timestamp, earliest))
5491     - earliest = idle_timestamp;
5492     - continue;
5493     + if (rxnet->live) {
5494     + idle_timestamp = READ_ONCE(conn->idle_timestamp);
5495     + expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
5496     + if (conn->params.local->service_closed)
5497     + expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
5498     +
5499     + _debug("reap CONN %d { u=%d,t=%ld }",
5500     + conn->debug_id, atomic_read(&conn->usage),
5501     + (long)expire_at - (long)now);
5502     +
5503     + if (time_before(now, expire_at)) {
5504     + if (time_before(expire_at, earliest))
5505     + earliest = expire_at;
5506     + continue;
5507     + }
5508     }
5509    
5510     /* The usage count sits at 1 whilst the object is unused on the
5511     @@ -387,6 +393,7 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
5512     */
5513     if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
5514     continue;
5515     + trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
5516    
5517     if (rxrpc_conn_is_client(conn))
5518     BUG();
5519     @@ -397,10 +404,10 @@ void rxrpc_service_connection_reaper(struct work_struct *work)
5520     }
5521     write_unlock(&rxnet->conn_lock);
5522    
5523     - if (earliest != ULONG_MAX) {
5524     - _debug("reschedule reaper %ld", (long) earliest - now);
5525     + if (earliest != now + MAX_JIFFY_OFFSET) {
5526     + _debug("reschedule reaper %ld", (long)earliest - (long)now);
5527     ASSERT(time_after(earliest, now));
5528     - rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
5529     + rxrpc_queue_delayed_work(&rxnet->service_conn_reaper,
5530     earliest - now);
5531     }
5532    
5533     @@ -429,7 +436,6 @@ void rxrpc_destroy_all_connections(struct rxrpc_net *rxnet)
5534    
5535     rxrpc_destroy_all_client_connections(rxnet);
5536    
5537     - rxrpc_connection_expiry = 0;
5538     cancel_delayed_work(&rxnet->client_conn_reaper);
5539     rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
5540     flush_workqueue(rxrpc_workqueue);
5541     diff --git a/net/rxrpc/net_ns.c b/net/rxrpc/net_ns.c
5542     index 7edceb8522f5..684c51d600c7 100644
5543     --- a/net/rxrpc/net_ns.c
5544     +++ b/net/rxrpc/net_ns.c
5545     @@ -22,6 +22,7 @@ static __net_init int rxrpc_init_net(struct net *net)
5546     struct rxrpc_net *rxnet = rxrpc_net(net);
5547     int ret;
5548    
5549     + rxnet->live = true;
5550     get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
5551     rxnet->epoch |= RXRPC_RANDOM_EPOCH;
5552    
5553     @@ -60,6 +61,7 @@ static __net_init int rxrpc_init_net(struct net *net)
5554     return 0;
5555    
5556     err_proc:
5557     + rxnet->live = false;
5558     return ret;
5559     }
5560    
5561     @@ -70,6 +72,7 @@ static __net_exit void rxrpc_exit_net(struct net *net)
5562     {
5563     struct rxrpc_net *rxnet = rxrpc_net(net);
5564    
5565     + rxnet->live = false;
5566     rxrpc_destroy_all_calls(rxnet);
5567     rxrpc_destroy_all_connections(rxnet);
5568     rxrpc_destroy_all_locals(rxnet);
5569     diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
5570     index 9ea6f972767e..d2f51d6a253c 100644
5571     --- a/net/rxrpc/sendmsg.c
5572     +++ b/net/rxrpc/sendmsg.c
5573     @@ -563,8 +563,8 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
5574     /* The socket is now unlocked. */
5575     if (IS_ERR(call))
5576     return PTR_ERR(call);
5577     - rxrpc_put_call(call, rxrpc_call_put);
5578     - return 0;
5579     + ret = 0;
5580     + goto out_put_unlock;
5581     }
5582    
5583     call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
5584     @@ -633,6 +633,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
5585     ret = rxrpc_send_data(rx, call, msg, len, NULL);
5586     }
5587    
5588     +out_put_unlock:
5589     mutex_unlock(&call->user_mutex);
5590     error_put:
5591     rxrpc_put_call(call, rxrpc_call_put);
5592     diff --git a/net/sctp/stream.c b/net/sctp/stream.c
5593     index 724adf2786a2..9ea6057ed28b 100644
5594     --- a/net/sctp/stream.c
5595     +++ b/net/sctp/stream.c
5596     @@ -224,6 +224,9 @@ int sctp_send_reset_assoc(struct sctp_association *asoc)
5597     if (asoc->strreset_outstanding)
5598     return -EINPROGRESS;
5599    
5600     + if (!sctp_outq_is_empty(&asoc->outqueue))
5601     + return -EAGAIN;
5602     +
5603     chunk = sctp_make_strreset_tsnreq(asoc);
5604     if (!chunk)
5605     return -ENOMEM;
5606     @@ -538,12 +541,18 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
5607     i = asoc->strreset_inseq - request_seq - 1;
5608     result = asoc->strreset_result[i];
5609     if (result == SCTP_STRRESET_PERFORMED) {
5610     - next_tsn = asoc->next_tsn;
5611     + next_tsn = asoc->ctsn_ack_point + 1;
5612     init_tsn =
5613     sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
5614     }
5615     goto err;
5616     }
5617     +
5618     + if (!sctp_outq_is_empty(&asoc->outqueue)) {
5619     + result = SCTP_STRRESET_IN_PROGRESS;
5620     + goto err;
5621     + }
5622     +
5623     asoc->strreset_inseq++;
5624    
5625     if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
5626     @@ -554,9 +563,10 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
5627     goto out;
5628     }
5629    
5630     - /* G3: The same processing as though a SACK chunk with no gap report
5631     - * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
5632     - * received MUST be performed.
5633     + /* G4: The same processing as though a FWD-TSN chunk (as defined in
5634     + * [RFC3758]) with all streams affected and a new cumulative TSN
5635     + * ACK of the Receiver's Next TSN minus 1 were received MUST be
5636     + * performed.
5637     */
5638     max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
5639     sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
5640     @@ -571,10 +581,9 @@ struct sctp_chunk *sctp_process_strreset_tsnreq(
5641     sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
5642     init_tsn, GFP_ATOMIC);
5643    
5644     - /* G4: The same processing as though a FWD-TSN chunk (as defined in
5645     - * [RFC3758]) with all streams affected and a new cumulative TSN
5646     - * ACK of the Receiver's Next TSN minus 1 were received MUST be
5647     - * performed.
5648     + /* G3: The same processing as though a SACK chunk with no gap report
5649     + * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
5650     + * received MUST be performed.
5651     */
5652     sctp_outq_free(&asoc->outqueue);
5653    
5654     @@ -835,6 +844,7 @@ struct sctp_chunk *sctp_process_strreset_resp(
5655     if (result == SCTP_STRRESET_PERFORMED) {
5656     __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
5657     &asoc->peer.tsn_map);
5658     + LIST_HEAD(temp);
5659    
5660     sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
5661     sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
5662     @@ -843,7 +853,13 @@ struct sctp_chunk *sctp_process_strreset_resp(
5663     SCTP_TSN_MAP_INITIAL,
5664     stsn, GFP_ATOMIC);
5665    
5666     + /* Clean up sacked and abandoned queues only. As the
5667     + * out_chunk_list may not be empty, splice it to temp,
5668     + * then get it back after sctp_outq_free is done.
5669     + */
5670     + list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
5671     sctp_outq_free(&asoc->outqueue);
5672     + list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
5673    
5674     asoc->next_tsn = rtsn;
5675     asoc->ctsn_ack_point = asoc->next_tsn - 1;
5676     diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
5677     index 4dad5da388d6..8cb40f8ffa5b 100644
5678     --- a/net/sunrpc/xprtsock.c
5679     +++ b/net/sunrpc/xprtsock.c
5680     @@ -2437,6 +2437,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
5681     case -ECONNREFUSED:
5682     case -ECONNRESET:
5683     case -ENETUNREACH:
5684     + case -EHOSTUNREACH:
5685     case -EADDRINUSE:
5686     case -ENOBUFS:
5687     /*
5688     diff --git a/scripts/faddr2line b/scripts/faddr2line
5689     index 1f5ce959f596..39e07d8574dd 100755
5690     --- a/scripts/faddr2line
5691     +++ b/scripts/faddr2line
5692     @@ -44,9 +44,16 @@
5693     set -o errexit
5694     set -o nounset
5695    
5696     +READELF="${CROSS_COMPILE}readelf"
5697     +ADDR2LINE="${CROSS_COMPILE}addr2line"
5698     +SIZE="${CROSS_COMPILE}size"
5699     +NM="${CROSS_COMPILE}nm"
5700     +
5701     command -v awk >/dev/null 2>&1 || die "awk isn't installed"
5702     -command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
5703     -command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
5704     +command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
5705     +command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed"
5706     +command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed"
5707     +command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed"
5708    
5709     usage() {
5710     echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
5711     @@ -69,10 +76,10 @@ die() {
5712     find_dir_prefix() {
5713     local objfile=$1
5714    
5715     - local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
5716     + local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
5717     [[ -z $start_kernel_addr ]] && return
5718    
5719     - local file_line=$(addr2line -e $objfile $start_kernel_addr)
5720     + local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
5721     [[ -z $file_line ]] && return
5722    
5723     local prefix=${file_line%init/main.c:*}
5724     @@ -104,7 +111,7 @@ __faddr2line() {
5725    
5726     # Go through each of the object's symbols which match the func name.
5727     # In rare cases there might be duplicates.
5728     - file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
5729     + file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}')
5730     while read symbol; do
5731     local fields=($symbol)
5732     local sym_base=0x${fields[0]}
5733     @@ -156,10 +163,10 @@ __faddr2line() {
5734    
5735     # pass real address to addr2line
5736     echo "$func+$offset/$sym_size:"
5737     - addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
5738     + ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
5739     DONE=1
5740    
5741     - done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
5742     + done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
5743     }
5744    
5745     [[ $# -lt 2 ]] && usage
5746     diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
5747     index 95209a5f8595..8daf16e1d421 100644
5748     --- a/security/integrity/ima/ima_policy.c
5749     +++ b/security/integrity/ima/ima_policy.c
5750     @@ -743,7 +743,7 @@ static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
5751     case Opt_fsuuid:
5752     ima_log_string(ab, "fsuuid", args[0].from);
5753    
5754     - if (uuid_is_null(&entry->fsuuid)) {
5755     + if (!uuid_is_null(&entry->fsuuid)) {
5756     result = -EINVAL;
5757     break;
5758     }
5759     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
5760     index 145e92d6ca94..b2d039537d5e 100644
5761     --- a/sound/pci/hda/patch_realtek.c
5762     +++ b/sound/pci/hda/patch_realtek.c
5763     @@ -3131,11 +3131,13 @@ static void alc256_shutup(struct hda_codec *codec)
5764     if (hp_pin_sense)
5765     msleep(85);
5766    
5767     + /* 3k pull low control for Headset jack. */
5768     + /* NOTE: call this before clearing the pin, otherwise codec stalls */
5769     + alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
5770     +
5771     snd_hda_codec_write(codec, hp_pin, 0,
5772     AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
5773    
5774     - alc_update_coef_idx(codec, 0x46, 0, 3 << 12); /* 3k pull low control for Headset jack. */
5775     -
5776     if (hp_pin_sense)
5777     msleep(100);
5778    
5779     diff --git a/tools/gpio/gpio-event-mon.c b/tools/gpio/gpio-event-mon.c
5780     index 1c14c2595158..4b36323ea64b 100644
5781     --- a/tools/gpio/gpio-event-mon.c
5782     +++ b/tools/gpio/gpio-event-mon.c
5783     @@ -23,6 +23,7 @@
5784     #include <getopt.h>
5785     #include <inttypes.h>
5786     #include <sys/ioctl.h>
5787     +#include <sys/types.h>
5788     #include <linux/gpio.h>
5789    
5790     int monitor_device(const char *device_name,
5791     diff --git a/tools/power/cpupower/bench/system.c b/tools/power/cpupower/bench/system.c
5792     index c25a74ae51ba..2bb3eef7d5c1 100644
5793     --- a/tools/power/cpupower/bench/system.c
5794     +++ b/tools/power/cpupower/bench/system.c
5795     @@ -61,7 +61,7 @@ int set_cpufreq_governor(char *governor, unsigned int cpu)
5796    
5797     dprintf("set %s as cpufreq governor\n", governor);
5798    
5799     - if (cpupower_is_cpu_online(cpu) != 0) {
5800     + if (cpupower_is_cpu_online(cpu) != 1) {
5801     perror("cpufreq_cpu_exists");
5802     fprintf(stderr, "error: cpu %u does not exist\n", cpu);
5803     return -1;
5804     diff --git a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
5805     index 1b5da0066ebf..5b3205f16217 100644
5806     --- a/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
5807     +++ b/tools/power/cpupower/utils/idle_monitor/cpuidle_sysfs.c
5808     @@ -130,15 +130,18 @@ static struct cpuidle_monitor *cpuidle_register(void)
5809     {
5810     int num;
5811     char *tmp;
5812     + int this_cpu;
5813     +
5814     + this_cpu = sched_getcpu();
5815    
5816     /* Assume idle state count is the same for all CPUs */
5817     - cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
5818     + cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
5819    
5820     if (cpuidle_sysfs_monitor.hw_states_num <= 0)
5821     return NULL;
5822    
5823     for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
5824     - tmp = cpuidle_state_name(0, num);
5825     + tmp = cpuidle_state_name(this_cpu, num);
5826     if (tmp == NULL)
5827     continue;
5828    
5829     @@ -146,7 +149,7 @@ static struct cpuidle_monitor *cpuidle_register(void)
5830     strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
5831     free(tmp);
5832    
5833     - tmp = cpuidle_state_desc(0, num);
5834     + tmp = cpuidle_state_desc(this_cpu, num);
5835     if (tmp == NULL)
5836     continue;
5837     strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
5838     diff --git a/tools/usb/usbip/src/usbip_bind.c b/tools/usb/usbip/src/usbip_bind.c
5839     index fa46141ae68b..e121cfb1746a 100644
5840     --- a/tools/usb/usbip/src/usbip_bind.c
5841     +++ b/tools/usb/usbip/src/usbip_bind.c
5842     @@ -144,6 +144,7 @@ static int bind_device(char *busid)
5843     int rc;
5844     struct udev *udev;
5845     struct udev_device *dev;
5846     + const char *devpath;
5847    
5848     /* Check whether the device with this bus ID exists. */
5849     udev = udev_new();
5850     @@ -152,8 +153,16 @@ static int bind_device(char *busid)
5851     err("device with the specified bus ID does not exist");
5852     return -1;
5853     }
5854     + devpath = udev_device_get_devpath(dev);
5855     udev_unref(udev);
5856    
5857     + /* If the device is already attached to vhci_hcd - bail out */
5858     + if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
5859     + err("bind loop detected: device: %s is attached to %s\n",
5860     + devpath, USBIP_VHCI_DRV_NAME);
5861     + return -1;
5862     + }
5863     +
5864     rc = unbind_other(busid);
5865     if (rc == UNBIND_ST_FAILED) {
5866     err("could not unbind driver from device on busid %s", busid);
5867     diff --git a/tools/usb/usbip/src/usbip_list.c b/tools/usb/usbip/src/usbip_list.c
5868     index f1b38e866dd7..d65a9f444174 100644
5869     --- a/tools/usb/usbip/src/usbip_list.c
5870     +++ b/tools/usb/usbip/src/usbip_list.c
5871     @@ -187,6 +187,7 @@ static int list_devices(bool parsable)
5872     const char *busid;
5873     char product_name[128];
5874     int ret = -1;
5875     + const char *devpath;
5876    
5877     /* Create libudev context. */
5878     udev = udev_new();
5879     @@ -209,6 +210,14 @@ static int list_devices(bool parsable)
5880     path = udev_list_entry_get_name(dev_list_entry);
5881     dev = udev_device_new_from_syspath(udev, path);
5882    
5883     + /* Ignore devices attached to vhci_hcd */
5884     + devpath = udev_device_get_devpath(dev);
5885     + if (strstr(devpath, USBIP_VHCI_DRV_NAME)) {
5886     + dbg("Skip the device %s already attached to %s\n",
5887     + devpath, USBIP_VHCI_DRV_NAME);
5888     + continue;
5889     + }
5890     +
5891     /* Get device information. */
5892     idVendor = udev_device_get_sysattr_value(dev, "idVendor");
5893     idProduct = udev_device_get_sysattr_value(dev, "idProduct");
5894     diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
5895     index 95cba0799828..9a07ee94a230 100644
5896     --- a/virt/kvm/arm/arm.c
5897     +++ b/virt/kvm/arm/arm.c
5898     @@ -612,7 +612,6 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
5899     int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
5900     {
5901     int ret;
5902     - sigset_t sigsaved;
5903    
5904     if (unlikely(!kvm_vcpu_initialized(vcpu)))
5905     return -ENOEXEC;
5906     @@ -630,8 +629,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
5907     if (run->immediate_exit)
5908     return -EINTR;
5909    
5910     - if (vcpu->sigset_active)
5911     - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
5912     + kvm_sigset_activate(vcpu);
5913    
5914     ret = 1;
5915     run->exit_reason = KVM_EXIT_UNKNOWN;
5916     @@ -753,8 +751,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
5917     kvm_pmu_update_run(vcpu);
5918     }
5919    
5920     - if (vcpu->sigset_active)
5921     - sigprocmask(SIG_SETMASK, &sigsaved, NULL);
5922     + kvm_sigset_deactivate(vcpu);
5923     +
5924     return ret;
5925     }
5926    
5927     diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
5928     index 2447d7c017e7..8401774f5aeb 100644
5929     --- a/virt/kvm/kvm_main.c
5930     +++ b/virt/kvm/kvm_main.c
5931     @@ -2073,6 +2073,29 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn)
5932     }
5933     EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
5934    
5935     +void kvm_sigset_activate(struct kvm_vcpu *vcpu)
5936     +{
5937     + if (!vcpu->sigset_active)
5938     + return;
5939     +
5940     + /*
5941     + * This does a lockless modification of ->real_blocked, which is fine
5942     + * because, only current can change ->real_blocked and all readers of
5943     + * ->real_blocked don't care as long ->real_blocked is always a subset
5944     + * of ->blocked.
5945     + */
5946     + sigprocmask(SIG_SETMASK, &vcpu->sigset, &current->real_blocked);
5947     +}
5948     +
5949     +void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
5950     +{
5951     + if (!vcpu->sigset_active)
5952     + return;
5953     +
5954     + sigprocmask(SIG_SETMASK, &current->real_blocked, NULL);
5955     + sigemptyset(&current->real_blocked);
5956     +}
5957     +
5958     static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
5959     {
5960     unsigned int old, val, grow;