Magellan Linux

Annotation of /trunk/kernel-alx-legacy/patches-4.9/0165-4.9.66-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3608 - (hide annotations) (download)
Fri Aug 14 07:34:29 2020 UTC (3 years, 8 months ago) by niro
File size: 157895 byte(s)
-added kerenl-alx-legacy pkg
1 niro 3608 diff --git a/Makefile b/Makefile
2     index 87a641515e9c..8e62f9e2a08c 100644
3     --- a/Makefile
4     +++ b/Makefile
5     @@ -1,6 +1,6 @@
6     VERSION = 4
7     PATCHLEVEL = 9
8     -SUBLEVEL = 65
9     +SUBLEVEL = 66
10     EXTRAVERSION =
11     NAME = Roaring Lionus
12    
13     diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
14     index 9fe8e241335c..e1f6f0daa847 100644
15     --- a/arch/arm/mm/dump.c
16     +++ b/arch/arm/mm/dump.c
17     @@ -126,8 +126,8 @@ static const struct prot_bits section_bits[] = {
18     .val = PMD_SECT_USER,
19     .set = "USR",
20     }, {
21     - .mask = L_PMD_SECT_RDONLY,
22     - .val = L_PMD_SECT_RDONLY,
23     + .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
24     + .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
25     .set = "ro",
26     .clear = "RW",
27     #elif __LINUX_ARM_ARCH__ >= 6
28     diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
29     index 370581aeb871..4c587ad8bfe3 100644
30     --- a/arch/arm/mm/init.c
31     +++ b/arch/arm/mm/init.c
32     @@ -619,8 +619,8 @@ static struct section_perm ro_perms[] = {
33     .start = (unsigned long)_stext,
34     .end = (unsigned long)__init_begin,
35     #ifdef CONFIG_ARM_LPAE
36     - .mask = ~L_PMD_SECT_RDONLY,
37     - .prot = L_PMD_SECT_RDONLY,
38     + .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
39     + .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
40     #else
41     .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
42     .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
43     diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
44     index 61e214015b38..7acd3c5c7643 100644
45     --- a/arch/arm64/include/asm/pgtable.h
46     +++ b/arch/arm64/include/asm/pgtable.h
47     @@ -91,6 +91,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
48     ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
49     #define pte_valid_young(pte) \
50     ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
51     +#define pte_valid_user(pte) \
52     + ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
53    
54     /*
55     * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
56     @@ -100,6 +102,18 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
57     #define pte_accessible(mm, pte) \
58     (mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
59    
60     +/*
61     + * p??_access_permitted() is true for valid user mappings (subject to the
62     + * write permission check) other than user execute-only which do not have the
63     + * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
64     + */
65     +#define pte_access_permitted(pte, write) \
66     + (pte_valid_user(pte) && (!(write) || pte_write(pte)))
67     +#define pmd_access_permitted(pmd, write) \
68     + (pte_access_permitted(pmd_pte(pmd), (write)))
69     +#define pud_access_permitted(pud, write) \
70     + (pte_access_permitted(pud_pte(pud), (write)))
71     +
72     static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
73     {
74     pte_val(pte) &= ~pgprot_val(prot);
75     diff --git a/arch/mips/bcm47xx/leds.c b/arch/mips/bcm47xx/leds.c
76     index d20ae63eb3c2..46abe9e4e0e0 100644
77     --- a/arch/mips/bcm47xx/leds.c
78     +++ b/arch/mips/bcm47xx/leds.c
79     @@ -330,7 +330,7 @@ bcm47xx_leds_linksys_wrt54g3gv2[] __initconst = {
80     /* Verified on: WRT54GS V1.0 */
81     static const struct gpio_led
82     bcm47xx_leds_linksys_wrt54g_type_0101[] __initconst = {
83     - BCM47XX_GPIO_LED(0, "green", "wlan", 0, LEDS_GPIO_DEFSTATE_OFF),
84     + BCM47XX_GPIO_LED(0, "green", "wlan", 1, LEDS_GPIO_DEFSTATE_OFF),
85     BCM47XX_GPIO_LED(1, "green", "power", 0, LEDS_GPIO_DEFSTATE_ON),
86     BCM47XX_GPIO_LED(7, "green", "dmz", 1, LEDS_GPIO_DEFSTATE_OFF),
87     };
88     diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
89     index d61bc2aebf69..7d90a8710425 100644
90     --- a/arch/mips/boot/dts/brcm/Makefile
91     +++ b/arch/mips/boot/dts/brcm/Makefile
92     @@ -22,7 +22,6 @@ dtb-$(CONFIG_DT_NONE) += \
93     bcm63268-comtrend-vr-3032u.dtb \
94     bcm93384wvg.dtb \
95     bcm93384wvg_viper.dtb \
96     - bcm96358nb4ser.dtb \
97     bcm96368mvwg.dtb \
98     bcm9ejtagprb.dtb \
99     bcm97125cbmb.dtb \
100     diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
101     index 83054f79f72a..8333ce90b172 100644
102     --- a/arch/mips/include/asm/asmmacro.h
103     +++ b/arch/mips/include/asm/asmmacro.h
104     @@ -19,6 +19,9 @@
105     #include <asm/asmmacro-64.h>
106     #endif
107    
108     +/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
109     +#undef fp
110     +
111     /*
112     * Helper macros for generating raw instruction encodings.
113     */
114     @@ -105,6 +108,7 @@
115     .macro fpu_save_16odd thread
116     .set push
117     .set mips64r2
118     + .set fp=64
119     SET_HARDFLOAT
120     sdc1 $f1, THREAD_FPR1(\thread)
121     sdc1 $f3, THREAD_FPR3(\thread)
122     @@ -163,6 +167,7 @@
123     .macro fpu_restore_16odd thread
124     .set push
125     .set mips64r2
126     + .set fp=64
127     SET_HARDFLOAT
128     ldc1 $f1, THREAD_FPR1(\thread)
129     ldc1 $f3, THREAD_FPR3(\thread)
130     @@ -234,9 +239,6 @@
131     .endm
132    
133     #ifdef TOOLCHAIN_SUPPORTS_MSA
134     -/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
135     -#undef fp
136     -
137     .macro _cfcmsa rd, cs
138     .set push
139     .set mips32r2
140     diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
141     index 3de026034c35..11890e6e4093 100644
142     --- a/arch/mips/kernel/ptrace.c
143     +++ b/arch/mips/kernel/ptrace.c
144     @@ -647,6 +647,19 @@ static const struct user_regset_view user_mips64_view = {
145     .n = ARRAY_SIZE(mips64_regsets),
146     };
147    
148     +#ifdef CONFIG_MIPS32_N32
149     +
150     +static const struct user_regset_view user_mipsn32_view = {
151     + .name = "mipsn32",
152     + .e_flags = EF_MIPS_ABI2,
153     + .e_machine = ELF_ARCH,
154     + .ei_osabi = ELF_OSABI,
155     + .regsets = mips64_regsets,
156     + .n = ARRAY_SIZE(mips64_regsets),
157     +};
158     +
159     +#endif /* CONFIG_MIPS32_N32 */
160     +
161     #endif /* CONFIG_64BIT */
162    
163     const struct user_regset_view *task_user_regset_view(struct task_struct *task)
164     @@ -657,6 +670,10 @@ const struct user_regset_view *task_user_regset_view(struct task_struct *task)
165     #ifdef CONFIG_MIPS32_O32
166     if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
167     return &user_mips_view;
168     +#endif
169     +#ifdef CONFIG_MIPS32_N32
170     + if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
171     + return &user_mipsn32_view;
172     #endif
173     return &user_mips64_view;
174     #endif
175     diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
176     index 628c5132b3d8..a7962f79c4fe 100644
177     --- a/arch/mips/pci/pci-mt7620.c
178     +++ b/arch/mips/pci/pci-mt7620.c
179     @@ -121,7 +121,7 @@ static int wait_pciephy_busy(void)
180     else
181     break;
182     if (retry++ > WAITRETRY_MAX) {
183     - printk(KERN_WARN "PCIE-PHY retry failed.\n");
184     + pr_warn("PCIE-PHY retry failed.\n");
185     return -1;
186     }
187     }
188     diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
189     index 6f892c1f3ad7..0696142048d5 100644
190     --- a/arch/mips/ralink/mt7620.c
191     +++ b/arch/mips/ralink/mt7620.c
192     @@ -141,8 +141,8 @@ static struct rt2880_pmx_func i2c_grp_mt7628[] = {
193     FUNC("i2c", 0, 4, 2),
194     };
195    
196     -static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
197     -static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
198     +static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) };
199     +static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) };
200     static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) };
201     static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
202    
203     diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
204     index 41e60a9c7db2..e775f80ae28c 100644
205     --- a/arch/parisc/kernel/syscall.S
206     +++ b/arch/parisc/kernel/syscall.S
207     @@ -690,15 +690,15 @@ cas_action:
208     /* ELF32 Process entry path */
209     lws_compare_and_swap_2:
210     #ifdef CONFIG_64BIT
211     - /* Clip the input registers */
212     + /* Clip the input registers. We don't need to clip %r23 as we
213     + only use it for word operations */
214     depdi 0, 31, 32, %r26
215     depdi 0, 31, 32, %r25
216     depdi 0, 31, 32, %r24
217     - depdi 0, 31, 32, %r23
218     #endif
219    
220     /* Check the validity of the size pointer */
221     - subi,>>= 4, %r23, %r0
222     + subi,>>= 3, %r23, %r0
223     b,n lws_exit_nosys
224    
225     /* Jump to the functions which will load the old and new values into
226     diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
227     index bbe77aed198d..3600c0d99ae9 100644
228     --- a/arch/powerpc/kernel/signal.c
229     +++ b/arch/powerpc/kernel/signal.c
230     @@ -102,7 +102,7 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
231     static void do_signal(struct task_struct *tsk)
232     {
233     sigset_t *oldset = sigmask_to_save();
234     - struct ksignal ksig;
235     + struct ksignal ksig = { .sig = 0 };
236     int ret;
237     int is32 = is_32bit_task();
238    
239     diff --git a/arch/s390/include/asm/asm-prototypes.h b/arch/s390/include/asm/asm-prototypes.h
240     new file mode 100644
241     index 000000000000..2c3413b0ca52
242     --- /dev/null
243     +++ b/arch/s390/include/asm/asm-prototypes.h
244     @@ -0,0 +1,8 @@
245     +#ifndef _ASM_S390_PROTOTYPES_H
246     +
247     +#include <linux/kvm_host.h>
248     +#include <linux/ftrace.h>
249     +#include <asm/fpu/api.h>
250     +#include <asm-generic/asm-prototypes.h>
251     +
252     +#endif /* _ASM_S390_PROTOTYPES_H */
253     diff --git a/arch/s390/include/asm/switch_to.h b/arch/s390/include/asm/switch_to.h
254     index 12d45f0cfdd9..dde6b52359c5 100644
255     --- a/arch/s390/include/asm/switch_to.h
256     +++ b/arch/s390/include/asm/switch_to.h
257     @@ -34,8 +34,8 @@ static inline void restore_access_regs(unsigned int *acrs)
258     save_access_regs(&prev->thread.acrs[0]); \
259     save_ri_cb(prev->thread.ri_cb); \
260     } \
261     + update_cr_regs(next); \
262     if (next->mm) { \
263     - update_cr_regs(next); \
264     set_cpu_flag(CIF_FPU); \
265     restore_access_regs(&next->thread.acrs[0]); \
266     restore_ri_cb(next->thread.ri_cb, prev->thread.ri_cb); \
267     diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c
268     index c74c59236f44..aaf9dab3c193 100644
269     --- a/arch/s390/kernel/dis.c
270     +++ b/arch/s390/kernel/dis.c
271     @@ -1548,6 +1548,7 @@ static struct s390_insn opcode_e7[] = {
272     { "vfsq", 0xce, INSTR_VRR_VV000MM },
273     { "vfs", 0xe2, INSTR_VRR_VVV00MM },
274     { "vftci", 0x4a, INSTR_VRI_VVIMM },
275     + { "", 0, INSTR_INVALID }
276     };
277    
278     static struct s390_insn opcode_eb[] = {
279     @@ -1953,7 +1954,7 @@ void show_code(struct pt_regs *regs)
280     {
281     char *mode = user_mode(regs) ? "User" : "Krnl";
282     unsigned char code[64];
283     - char buffer[64], *ptr;
284     + char buffer[128], *ptr;
285     mm_segment_t old_fs;
286     unsigned long addr;
287     int start, end, opsize, hops, i;
288     @@ -2016,7 +2017,7 @@ void show_code(struct pt_regs *regs)
289     start += opsize;
290     pr_cont("%s", buffer);
291     ptr = buffer;
292     - ptr += sprintf(ptr, "\n ");
293     + ptr += sprintf(ptr, "\n\t ");
294     hops++;
295     }
296     pr_cont("\n");
297     diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
298     index 0c196861bc38..29d87444a655 100644
299     --- a/arch/s390/kernel/early.c
300     +++ b/arch/s390/kernel/early.c
301     @@ -345,8 +345,10 @@ static __init void detect_machine_facilities(void)
302     S390_lowcore.machine_flags |= MACHINE_FLAG_IDTE;
303     if (test_facility(40))
304     S390_lowcore.machine_flags |= MACHINE_FLAG_LPP;
305     - if (test_facility(50) && test_facility(73))
306     + if (test_facility(50) && test_facility(73)) {
307     S390_lowcore.machine_flags |= MACHINE_FLAG_TE;
308     + __ctl_set_bit(0, 55);
309     + }
310     if (test_facility(51))
311     S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
312     if (test_facility(129)) {
313     diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
314     index bba4fa74b321..172fe1121d99 100644
315     --- a/arch/s390/kernel/process.c
316     +++ b/arch/s390/kernel/process.c
317     @@ -120,6 +120,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
318     memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
319     memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
320     clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
321     + p->thread.per_flags = 0;
322     /* Initialize per thread user and system timer values */
323     ti = task_thread_info(p);
324     ti->user_timer = 0;
325     diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
326     index fffa0e5462af..70cdb03d4acd 100644
327     --- a/arch/s390/kernel/runtime_instr.c
328     +++ b/arch/s390/kernel/runtime_instr.c
329     @@ -47,11 +47,13 @@ void exit_thread_runtime_instr(void)
330     {
331     struct task_struct *task = current;
332    
333     + preempt_disable();
334     if (!task->thread.ri_cb)
335     return;
336     disable_runtime_instr();
337     kfree(task->thread.ri_cb);
338     task->thread.ri_cb = NULL;
339     + preempt_enable();
340     }
341    
342     SYSCALL_DEFINE1(s390_runtime_instr, int, command)
343     @@ -62,9 +64,7 @@ SYSCALL_DEFINE1(s390_runtime_instr, int, command)
344     return -EOPNOTSUPP;
345    
346     if (command == S390_RUNTIME_INSTR_STOP) {
347     - preempt_disable();
348     exit_thread_runtime_instr();
349     - preempt_enable();
350     return 0;
351     }
352    
353     diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
354     index e7b0e7ff4c58..be9df513141e 100644
355     --- a/arch/x86/entry/entry_64.S
356     +++ b/arch/x86/entry/entry_64.S
357     @@ -54,15 +54,19 @@ ENTRY(native_usergs_sysret64)
358     ENDPROC(native_usergs_sysret64)
359     #endif /* CONFIG_PARAVIRT */
360    
361     -.macro TRACE_IRQS_IRETQ
362     +.macro TRACE_IRQS_FLAGS flags:req
363     #ifdef CONFIG_TRACE_IRQFLAGS
364     - bt $9, EFLAGS(%rsp) /* interrupts off? */
365     + bt $9, \flags /* interrupts off? */
366     jnc 1f
367     TRACE_IRQS_ON
368     1:
369     #endif
370     .endm
371    
372     +.macro TRACE_IRQS_IRETQ
373     + TRACE_IRQS_FLAGS EFLAGS(%rsp)
374     +.endm
375     +
376     /*
377     * When dynamic function tracer is enabled it will add a breakpoint
378     * to all locations that it is about to modify, sync CPUs, update
379     @@ -868,11 +872,13 @@ idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
380     ENTRY(native_load_gs_index)
381     pushfq
382     DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
383     + TRACE_IRQS_OFF
384     SWAPGS
385     .Lgs_change:
386     movl %edi, %gs
387     2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
388     SWAPGS
389     + TRACE_IRQS_FLAGS (%rsp)
390     popfq
391     ret
392     END(native_load_gs_index)
393     diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
394     index 8ca1eca5038d..4fbf0c94f2d1 100644
395     --- a/arch/x86/kvm/svm.c
396     +++ b/arch/x86/kvm/svm.c
397     @@ -3583,6 +3583,13 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
398     u32 ecx = msr->index;
399     u64 data = msr->data;
400     switch (ecx) {
401     + case MSR_IA32_CR_PAT:
402     + if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
403     + return 1;
404     + vcpu->arch.pat = data;
405     + svm->vmcb->save.g_pat = data;
406     + mark_dirty(svm->vmcb, VMCB_NPT);
407     + break;
408     case MSR_IA32_TSC:
409     kvm_write_tsc(vcpu, msr);
410     break;
411     diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
412     index a8ae57acb6f6..0f0b27d96f27 100644
413     --- a/arch/x86/kvm/vmx.c
414     +++ b/arch/x86/kvm/vmx.c
415     @@ -10715,6 +10715,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
416     vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
417     vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
418     vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
419     + vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
420     + vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
421    
422     /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
423     if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
424     diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
425     index 767be7c76034..1754e094bc28 100644
426     --- a/arch/x86/lib/x86-opcode-map.txt
427     +++ b/arch/x86/lib/x86-opcode-map.txt
428     @@ -896,7 +896,7 @@ EndTable
429    
430     GrpTable: Grp3_1
431     0: TEST Eb,Ib
432     -1:
433     +1: TEST Eb,Ib
434     2: NOT Eb
435     3: NEG Eb
436     4: MUL AL,Eb
437     diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
438     index 1dd796025472..8b5ff88aa4f8 100644
439     --- a/arch/x86/mm/fault.c
440     +++ b/arch/x86/mm/fault.c
441     @@ -1393,7 +1393,17 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
442     * make sure we exit gracefully rather than endlessly redo
443     * the fault. Since we never set FAULT_FLAG_RETRY_NOWAIT, if
444     * we get VM_FAULT_RETRY back, the mmap_sem has been unlocked.
445     + *
446     + * Note that handle_userfault() may also release and reacquire mmap_sem
447     + * (and not return with VM_FAULT_RETRY), when returning to userland to
448     + * repeat the page fault later with a VM_FAULT_NOPAGE retval
449     + * (potentially after handling any pending signal during the return to
450     + * userland). The return to userland is identified whenever
451     + * FAULT_FLAG_USER|FAULT_FLAG_KILLABLE are both set in flags.
452     + * Thus we have to be careful about not touching vma after handling the
453     + * fault, so we read the pkey beforehand.
454     */
455     + pkey = vma_pkey(vma);
456     fault = handle_mm_fault(vma, address, flags);
457     major |= fault & VM_FAULT_MAJOR;
458    
459     @@ -1420,7 +1430,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
460     return;
461     }
462    
463     - pkey = vma_pkey(vma);
464     up_read(&mm->mmap_sem);
465     if (unlikely(fault & VM_FAULT_ERROR)) {
466     mm_fault_error(regs, error_code, address, &pkey, fault);
467     diff --git a/block/blk-core.c b/block/blk-core.c
468     index 95379fc83805..b1c76aa73492 100644
469     --- a/block/blk-core.c
470     +++ b/block/blk-core.c
471     @@ -282,6 +282,7 @@ EXPORT_SYMBOL(blk_stop_queue);
472     void blk_sync_queue(struct request_queue *q)
473     {
474     del_timer_sync(&q->timeout);
475     + cancel_work_sync(&q->timeout_work);
476    
477     if (q->mq_ops) {
478     struct blk_mq_hw_ctx *hctx;
479     @@ -720,6 +721,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
480     setup_timer(&q->backing_dev_info.laptop_mode_wb_timer,
481     laptop_mode_timer_fn, (unsigned long) q);
482     setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
483     + INIT_WORK(&q->timeout_work, NULL);
484     INIT_LIST_HEAD(&q->queue_head);
485     INIT_LIST_HEAD(&q->timeout_list);
486     INIT_LIST_HEAD(&q->icq_list);
487     diff --git a/block/blk-timeout.c b/block/blk-timeout.c
488     index a30441a200c0..220661a50f58 100644
489     --- a/block/blk-timeout.c
490     +++ b/block/blk-timeout.c
491     @@ -135,8 +135,6 @@ void blk_timeout_work(struct work_struct *work)
492     struct request *rq, *tmp;
493     int next_set = 0;
494    
495     - if (blk_queue_enter(q, true))
496     - return;
497     spin_lock_irqsave(q->queue_lock, flags);
498    
499     list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list)
500     @@ -146,7 +144,6 @@ void blk_timeout_work(struct work_struct *work)
501     mod_timer(&q->timeout, round_jiffies_up(next));
502    
503     spin_unlock_irqrestore(q->queue_lock, flags);
504     - blk_queue_exit(q);
505     }
506    
507     /**
508     diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
509     index 51874695a730..c3bcb7f5986e 100644
510     --- a/drivers/acpi/ec.c
511     +++ b/drivers/acpi/ec.c
512     @@ -482,8 +482,11 @@ static inline void __acpi_ec_enable_event(struct acpi_ec *ec)
513     {
514     if (!test_and_set_bit(EC_FLAGS_QUERY_ENABLED, &ec->flags))
515     ec_log_drv("event unblocked");
516     - if (!test_bit(EC_FLAGS_QUERY_PENDING, &ec->flags))
517     - advance_transaction(ec);
518     + /*
519     + * Unconditionally invoke this once after enabling the event
520     + * handling mechanism to detect the pending events.
521     + */
522     + advance_transaction(ec);
523     }
524    
525     static inline void __acpi_ec_disable_event(struct acpi_ec *ec)
526     @@ -1458,11 +1461,10 @@ static int ec_install_handlers(struct acpi_ec *ec, bool handle_events)
527     if (test_bit(EC_FLAGS_STARTED, &ec->flags) &&
528     ec->reference_count >= 1)
529     acpi_ec_enable_gpe(ec, true);
530     -
531     - /* EC is fully operational, allow queries */
532     - acpi_ec_enable_event(ec);
533     }
534     }
535     + /* EC is fully operational, allow queries */
536     + acpi_ec_enable_event(ec);
537    
538     return 0;
539     }
540     diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
541     index 0e1ec37070d1..6475a1343483 100644
542     --- a/drivers/ata/libata-eh.c
543     +++ b/drivers/ata/libata-eh.c
544     @@ -2329,8 +2329,8 @@ static void ata_eh_link_autopsy(struct ata_link *link)
545     if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
546     eflags |= ATA_EFLAG_DUBIOUS_XFER;
547     ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
548     + trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
549     }
550     - trace_ata_eh_link_autopsy(dev, ehc->i.action, all_err_mask);
551     DPRINTK("EXIT\n");
552     }
553    
554     diff --git a/drivers/base/power/opp/of.c b/drivers/base/power/opp/of.c
555     index b52c617947ad..69379443e5eb 100644
556     --- a/drivers/base/power/opp/of.c
557     +++ b/drivers/base/power/opp/of.c
558     @@ -348,6 +348,7 @@ static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
559     if (ret) {
560     dev_err(dev, "%s: Failed to add OPP, %d\n", __func__,
561     ret);
562     + of_node_put(np);
563     goto free_table;
564     }
565     }
566     diff --git a/drivers/clk/qcom/gcc-ipq4019.c b/drivers/clk/qcom/gcc-ipq4019.c
567     index b593065de8db..8ab6ce4d976f 100644
568     --- a/drivers/clk/qcom/gcc-ipq4019.c
569     +++ b/drivers/clk/qcom/gcc-ipq4019.c
570     @@ -525,10 +525,20 @@ static struct clk_rcg2 sdcc1_apps_clk_src = {
571     };
572    
573     static const struct freq_tbl ftbl_gcc_apps_clk[] = {
574     - F(48000000, P_XO, 1, 0, 0),
575     + F(48000000, P_XO, 1, 0, 0),
576     F(200000000, P_FEPLL200, 1, 0, 0),
577     + F(384000000, P_DDRPLLAPSS, 1, 0, 0),
578     + F(413000000, P_DDRPLLAPSS, 1, 0, 0),
579     + F(448000000, P_DDRPLLAPSS, 1, 0, 0),
580     + F(488000000, P_DDRPLLAPSS, 1, 0, 0),
581     F(500000000, P_FEPLL500, 1, 0, 0),
582     - F(626000000, P_DDRPLLAPSS, 1, 0, 0),
583     + F(512000000, P_DDRPLLAPSS, 1, 0, 0),
584     + F(537000000, P_DDRPLLAPSS, 1, 0, 0),
585     + F(565000000, P_DDRPLLAPSS, 1, 0, 0),
586     + F(597000000, P_DDRPLLAPSS, 1, 0, 0),
587     + F(632000000, P_DDRPLLAPSS, 1, 0, 0),
588     + F(672000000, P_DDRPLLAPSS, 1, 0, 0),
589     + F(716000000, P_DDRPLLAPSS, 1, 0, 0),
590     { }
591     };
592    
593     diff --git a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
594     index 0cca3601d99e..df97e25aec76 100644
595     --- a/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
596     +++ b/drivers/clk/sunxi-ng/ccu-sun6i-a31.c
597     @@ -468,8 +468,8 @@ static SUNXI_CCU_MUX_WITH_GATE(daudio0_clk, "daudio0", daudio_parents,
598     static SUNXI_CCU_MUX_WITH_GATE(daudio1_clk, "daudio1", daudio_parents,
599     0x0b4, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
600    
601     -static SUNXI_CCU_M_WITH_GATE(spdif_clk, "spdif", "pll-audio",
602     - 0x0c0, 0, 4, BIT(31), CLK_SET_RATE_PARENT);
603     +static SUNXI_CCU_MUX_WITH_GATE(spdif_clk, "spdif", daudio_parents,
604     + 0x0c0, 16, 2, BIT(31), CLK_SET_RATE_PARENT);
605    
606     static SUNXI_CCU_GATE(usb_phy0_clk, "usb-phy0", "osc24M",
607     0x0cc, BIT(8), 0);
608     diff --git a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
609     index 9bd1f78a0547..e1dc4e5b34e1 100644
610     --- a/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
611     +++ b/drivers/clk/sunxi-ng/ccu-sun8i-a33.c
612     @@ -752,6 +752,13 @@ static const struct sunxi_ccu_desc sun8i_a33_ccu_desc = {
613     .num_resets = ARRAY_SIZE(sun8i_a33_ccu_resets),
614     };
615    
616     +static struct ccu_mux_nb sun8i_a33_cpu_nb = {
617     + .common = &cpux_clk.common,
618     + .cm = &cpux_clk.mux,
619     + .delay_us = 1, /* > 8 clock cycles at 24 MHz */
620     + .bypass_index = 1, /* index of 24 MHz oscillator */
621     +};
622     +
623     static void __init sun8i_a33_ccu_setup(struct device_node *node)
624     {
625     void __iomem *reg;
626     @@ -775,6 +782,9 @@ static void __init sun8i_a33_ccu_setup(struct device_node *node)
627     writel(val, reg + SUN8I_A33_PLL_MIPI_REG);
628    
629     sunxi_ccu_probe(node, reg, &sun8i_a33_ccu_desc);
630     +
631     + ccu_mux_notifier_register(pll_cpux_clk.common.hw.clk,
632     + &sun8i_a33_cpu_nb);
633     }
634     CLK_OF_DECLARE(sun8i_a33_ccu, "allwinner,sun8i-a33-ccu",
635     sun8i_a33_ccu_setup);
636     diff --git a/drivers/clk/ti/clk-dra7-atl.c b/drivers/clk/ti/clk-dra7-atl.c
637     index c77333230bdf..7d060ffe8975 100644
638     --- a/drivers/clk/ti/clk-dra7-atl.c
639     +++ b/drivers/clk/ti/clk-dra7-atl.c
640     @@ -265,8 +265,7 @@ static int of_dra7_atl_clk_probe(struct platform_device *pdev)
641    
642     /* Get configuration for the ATL instances */
643     snprintf(prop, sizeof(prop), "atl%u", i);
644     - of_node_get(node);
645     - cfg_node = of_find_node_by_name(node, prop);
646     + cfg_node = of_get_child_by_name(node, prop);
647     if (cfg_node) {
648     ret = of_property_read_u32(cfg_node, "bws",
649     &cdesc->bws);
650     diff --git a/drivers/crypto/marvell/cesa.h b/drivers/crypto/marvell/cesa.h
651     index e423d33decd4..36291840a12c 100644
652     --- a/drivers/crypto/marvell/cesa.h
653     +++ b/drivers/crypto/marvell/cesa.h
654     @@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
655     #define CESA_TDMA_SRC_IN_SRAM BIT(30)
656     #define CESA_TDMA_END_OF_REQ BIT(29)
657     #define CESA_TDMA_BREAK_CHAIN BIT(28)
658     -#define CESA_TDMA_TYPE_MSK GENMASK(27, 0)
659     +#define CESA_TDMA_SET_STATE BIT(27)
660     +#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
661     #define CESA_TDMA_DUMMY 0
662     #define CESA_TDMA_DATA 1
663     #define CESA_TDMA_OP 2
664     diff --git a/drivers/crypto/marvell/hash.c b/drivers/crypto/marvell/hash.c
665     index 77712b375b84..662cf4ddb04b 100644
666     --- a/drivers/crypto/marvell/hash.c
667     +++ b/drivers/crypto/marvell/hash.c
668     @@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
669     sreq->offset = 0;
670     }
671    
672     +static void mv_cesa_ahash_dma_step(struct ahash_request *req)
673     +{
674     + struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
675     + struct mv_cesa_req *base = &creq->base;
676     +
677     + /* We must explicitly set the digest state. */
678     + if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
679     + struct mv_cesa_engine *engine = base->engine;
680     + int i;
681     +
682     + /* Set the hash state in the IVDIG regs. */
683     + for (i = 0; i < ARRAY_SIZE(creq->state); i++)
684     + writel_relaxed(creq->state[i], engine->regs +
685     + CESA_IVDIG(i));
686     + }
687     +
688     + mv_cesa_dma_step(base);
689     +}
690     +
691     static void mv_cesa_ahash_step(struct crypto_async_request *req)
692     {
693     struct ahash_request *ahashreq = ahash_request_cast(req);
694     struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
695    
696     if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
697     - mv_cesa_dma_step(&creq->base);
698     + mv_cesa_ahash_dma_step(ahashreq);
699     else
700     mv_cesa_ahash_std_step(ahashreq);
701     }
702     @@ -562,11 +581,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
703     struct mv_cesa_ahash_dma_iter iter;
704     struct mv_cesa_op_ctx *op = NULL;
705     unsigned int frag_len;
706     + bool set_state = false;
707     int ret;
708    
709     basereq->chain.first = NULL;
710     basereq->chain.last = NULL;
711    
712     + if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
713     + set_state = true;
714     +
715     if (creq->src_nents) {
716     ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
717     DMA_TO_DEVICE);
718     @@ -650,6 +673,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
719     basereq->chain.last->flags |= (CESA_TDMA_END_OF_REQ |
720     CESA_TDMA_BREAK_CHAIN);
721    
722     + if (set_state) {
723     + /*
724     + * Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
725     + * let the step logic know that the IVDIG registers should be
726     + * explicitly set before launching a TDMA chain.
727     + */
728     + basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
729     + }
730     +
731     return 0;
732    
733     err_free_tdma:
734     diff --git a/drivers/crypto/marvell/tdma.c b/drivers/crypto/marvell/tdma.c
735     index 9fd7a5fbaa1b..0cda6e3f2b4b 100644
736     --- a/drivers/crypto/marvell/tdma.c
737     +++ b/drivers/crypto/marvell/tdma.c
738     @@ -112,7 +112,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
739     last->next = dreq->chain.first;
740     engine->chain.last = dreq->chain.last;
741    
742     - if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
743     + /*
744     + * Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
745     + * the last element of the current chain, or if the request
746     + * being queued needs the IV regs to be set before lauching
747     + * the request.
748     + */
749     + if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
750     + !(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
751     last->next_dma = dreq->chain.first->cur_dma;
752     }
753     }
754     diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c
755     index 245d759d5ffc..6059d81e701a 100644
756     --- a/drivers/dma/zx296702_dma.c
757     +++ b/drivers/dma/zx296702_dma.c
758     @@ -813,6 +813,7 @@ static int zx_dma_probe(struct platform_device *op)
759     INIT_LIST_HEAD(&d->slave.channels);
760     dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
761     dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
762     + dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
763     dma_cap_set(DMA_PRIVATE, d->slave.cap_mask);
764     d->slave.dev = &op->dev;
765     d->slave.device_free_chan_resources = zx_dma_free_chan_resources;
766     diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c
767     index 1ef85b0c2b1f..d27e9361e236 100644
768     --- a/drivers/gpio/gpio-mockup.c
769     +++ b/drivers/gpio/gpio-mockup.c
770     @@ -126,7 +126,7 @@ static int mockup_gpio_probe(struct platform_device *pdev)
771     int i;
772     int base;
773     int ngpio;
774     - char chip_name[sizeof(GPIO_NAME) + 3];
775     + char *chip_name;
776    
777     if (gpio_mockup_params_nr < 2)
778     return -EINVAL;
779     @@ -146,8 +146,12 @@ static int mockup_gpio_probe(struct platform_device *pdev)
780     ngpio = gpio_mockup_ranges[i * 2 + 1] - base;
781    
782     if (ngpio >= 0) {
783     - sprintf(chip_name, "%s-%c", GPIO_NAME,
784     - pins_name_start + i);
785     + chip_name = devm_kasprintf(dev, GFP_KERNEL,
786     + "%s-%c", GPIO_NAME,
787     + pins_name_start + i);
788     + if (!chip_name)
789     + return -ENOMEM;
790     +
791     ret = mockup_gpio_add(dev, &cntr[i],
792     chip_name, base, ngpio);
793     } else {
794     diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
795     index ffd673615772..26412d2f8c98 100644
796     --- a/drivers/gpu/drm/armada/Makefile
797     +++ b/drivers/gpu/drm/armada/Makefile
798     @@ -4,3 +4,5 @@ armada-y += armada_510.o
799     armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
800    
801     obj-$(CONFIG_DRM_ARMADA) := armada.o
802     +
803     +CFLAGS_armada_trace.o := -I$(src)
804     diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
805     index ee07bb4a57b7..11f54df0c19b 100644
806     --- a/drivers/gpu/drm/drm_mm.c
807     +++ b/drivers/gpu/drm/drm_mm.c
808     @@ -348,14 +348,12 @@ static void drm_mm_insert_helper_range(struct drm_mm_node *hole_node,
809    
810     BUG_ON(!hole_node->hole_follows || node->allocated);
811    
812     - if (adj_start < start)
813     - adj_start = start;
814     - if (adj_end > end)
815     - adj_end = end;
816     -
817     if (mm->color_adjust)
818     mm->color_adjust(hole_node, color, &adj_start, &adj_end);
819    
820     + adj_start = max(adj_start, start);
821     + adj_end = min(adj_end, end);
822     +
823     if (flags & DRM_MM_CREATE_TOP)
824     adj_start = adj_end - size;
825    
826     @@ -566,17 +564,15 @@ static struct drm_mm_node *drm_mm_search_free_in_range_generic(const struct drm_
827     flags & DRM_MM_SEARCH_BELOW) {
828     u64 hole_size = adj_end - adj_start;
829    
830     - if (adj_start < start)
831     - adj_start = start;
832     - if (adj_end > end)
833     - adj_end = end;
834     -
835     if (mm->color_adjust) {
836     mm->color_adjust(entry, color, &adj_start, &adj_end);
837     if (adj_end <= adj_start)
838     continue;
839     }
840    
841     + adj_start = max(adj_start, start);
842     + adj_end = min(adj_end, end);
843     +
844     if (!check_free_hole(adj_start, adj_end, size, alignment))
845     continue;
846    
847     diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
848     index 3ce9ba30d827..a19ec06f9e42 100644
849     --- a/drivers/gpu/drm/i915/intel_drv.h
850     +++ b/drivers/gpu/drm/i915/intel_drv.h
851     @@ -457,6 +457,7 @@ struct intel_crtc_scaler_state {
852    
853     struct intel_pipe_wm {
854     struct intel_wm_level wm[5];
855     + struct intel_wm_level raw_wm[5];
856     uint32_t linetime;
857     bool fbc_wm_enabled;
858     bool pipe_enabled;
859     diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
860     index 277a8026460b..49de4760cc16 100644
861     --- a/drivers/gpu/drm/i915/intel_pm.c
862     +++ b/drivers/gpu/drm/i915/intel_pm.c
863     @@ -27,7 +27,6 @@
864    
865     #include <linux/cpufreq.h>
866     #include <drm/drm_plane_helper.h>
867     -#include <drm/drm_atomic_helper.h>
868     #include "i915_drv.h"
869     #include "intel_drv.h"
870     #include "../../../platform/x86/intel_ips.h"
871     @@ -2018,9 +2017,9 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
872     const struct intel_crtc *intel_crtc,
873     int level,
874     struct intel_crtc_state *cstate,
875     - const struct intel_plane_state *pristate,
876     - const struct intel_plane_state *sprstate,
877     - const struct intel_plane_state *curstate,
878     + struct intel_plane_state *pristate,
879     + struct intel_plane_state *sprstate,
880     + struct intel_plane_state *curstate,
881     struct intel_wm_level *result)
882     {
883     uint16_t pri_latency = dev_priv->wm.pri_latency[level];
884     @@ -2342,24 +2341,28 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
885     struct intel_pipe_wm *pipe_wm;
886     struct drm_device *dev = state->dev;
887     const struct drm_i915_private *dev_priv = to_i915(dev);
888     - struct drm_plane *plane;
889     - const struct drm_plane_state *plane_state;
890     - const struct intel_plane_state *pristate = NULL;
891     - const struct intel_plane_state *sprstate = NULL;
892     - const struct intel_plane_state *curstate = NULL;
893     + struct intel_plane *intel_plane;
894     + struct intel_plane_state *pristate = NULL;
895     + struct intel_plane_state *sprstate = NULL;
896     + struct intel_plane_state *curstate = NULL;
897     int level, max_level = ilk_wm_max_level(dev), usable_level;
898     struct ilk_wm_maximums max;
899    
900     pipe_wm = &cstate->wm.ilk.optimal;
901    
902     - drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &cstate->base) {
903     - const struct intel_plane_state *ps = to_intel_plane_state(plane_state);
904     + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) {
905     + struct intel_plane_state *ps;
906    
907     - if (plane->type == DRM_PLANE_TYPE_PRIMARY)
908     + ps = intel_atomic_get_existing_plane_state(state,
909     + intel_plane);
910     + if (!ps)
911     + continue;
912     +
913     + if (intel_plane->base.type == DRM_PLANE_TYPE_PRIMARY)
914     pristate = ps;
915     - else if (plane->type == DRM_PLANE_TYPE_OVERLAY)
916     + else if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY)
917     sprstate = ps;
918     - else if (plane->type == DRM_PLANE_TYPE_CURSOR)
919     + else if (intel_plane->base.type == DRM_PLANE_TYPE_CURSOR)
920     curstate = ps;
921     }
922    
923     @@ -2381,9 +2384,11 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
924     if (pipe_wm->sprites_scaled)
925     usable_level = 0;
926    
927     - memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
928     ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate,
929     - pristate, sprstate, curstate, &pipe_wm->wm[0]);
930     + pristate, sprstate, curstate, &pipe_wm->raw_wm[0]);
931     +
932     + memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
933     + pipe_wm->wm[0] = pipe_wm->raw_wm[0];
934    
935     if (IS_HASWELL(dev) || IS_BROADWELL(dev))
936     pipe_wm->linetime = hsw_compute_linetime_wm(cstate);
937     @@ -2393,8 +2398,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
938    
939     ilk_compute_wm_reg_maximums(dev, 1, &max);
940    
941     - for (level = 1; level <= usable_level; level++) {
942     - struct intel_wm_level *wm = &pipe_wm->wm[level];
943     + for (level = 1; level <= max_level; level++) {
944     + struct intel_wm_level *wm = &pipe_wm->raw_wm[level];
945    
946     ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate,
947     pristate, sprstate, curstate, wm);
948     @@ -2404,10 +2409,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *cstate)
949     * register maximums since such watermarks are
950     * always invalid.
951     */
952     - if (!ilk_validate_wm_level(level, &max, wm)) {
953     - memset(wm, 0, sizeof(*wm));
954     - break;
955     - }
956     + if (level > usable_level)
957     + continue;
958     +
959     + if (ilk_validate_wm_level(level, &max, wm))
960     + pipe_wm->wm[level] = *wm;
961     + else
962     + usable_level = level;
963     }
964    
965     return 0;
966     diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
967     index cf83f6507ec8..48dfc163233e 100644
968     --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
969     +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
970     @@ -321,7 +321,8 @@ static void mtk_drm_unbind(struct device *dev)
971     {
972     struct mtk_drm_private *private = dev_get_drvdata(dev);
973    
974     - drm_put_dev(private->drm);
975     + drm_dev_unregister(private->drm);
976     + drm_dev_unref(private->drm);
977     private->drm = NULL;
978     }
979    
980     diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
981     index 6e6c59a661b6..223944a3ba18 100644
982     --- a/drivers/gpu/drm/sun4i/sun4i_backend.c
983     +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
984     @@ -172,7 +172,7 @@ int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
985     ret = sun4i_backend_drm_format_to_layer(plane, fb->pixel_format, &val);
986     if (ret) {
987     DRM_DEBUG_DRIVER("Invalid format\n");
988     - return val;
989     + return ret;
990     }
991    
992     regmap_update_bits(backend->regs, SUN4I_BACKEND_ATTCTL_REG1(layer),
993     diff --git a/drivers/iio/light/cm3232.c b/drivers/iio/light/cm3232.c
994     index fe89b6823217..263e97235ea0 100644
995     --- a/drivers/iio/light/cm3232.c
996     +++ b/drivers/iio/light/cm3232.c
997     @@ -119,7 +119,7 @@ static int cm3232_reg_init(struct cm3232_chip *chip)
998     if (ret < 0)
999     dev_err(&chip->client->dev, "Error writing reg_cmd\n");
1000    
1001     - return 0;
1002     + return ret;
1003     }
1004    
1005     /**
1006     diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
1007     index 1eee8f7e75ca..84f91858b5e6 100644
1008     --- a/drivers/infiniband/ulp/srp/ib_srp.c
1009     +++ b/drivers/infiniband/ulp/srp/ib_srp.c
1010     @@ -648,12 +648,19 @@ static void srp_path_rec_completion(int status,
1011     static int srp_lookup_path(struct srp_rdma_ch *ch)
1012     {
1013     struct srp_target_port *target = ch->target;
1014     - int ret;
1015     + int ret = -ENODEV;
1016    
1017     ch->path.numb_path = 1;
1018    
1019     init_completion(&ch->done);
1020    
1021     + /*
1022     + * Avoid that the SCSI host can be removed by srp_remove_target()
1023     + * before srp_path_rec_completion() is called.
1024     + */
1025     + if (!scsi_host_get(target->scsi_host))
1026     + goto out;
1027     +
1028     ch->path_query_id = ib_sa_path_rec_get(&srp_sa_client,
1029     target->srp_host->srp_dev->dev,
1030     target->srp_host->port,
1031     @@ -667,18 +674,24 @@ static int srp_lookup_path(struct srp_rdma_ch *ch)
1032     GFP_KERNEL,
1033     srp_path_rec_completion,
1034     ch, &ch->path_query);
1035     - if (ch->path_query_id < 0)
1036     - return ch->path_query_id;
1037     + ret = ch->path_query_id;
1038     + if (ret < 0)
1039     + goto put;
1040    
1041     ret = wait_for_completion_interruptible(&ch->done);
1042     if (ret < 0)
1043     - return ret;
1044     + goto put;
1045    
1046     - if (ch->status < 0)
1047     + ret = ch->status;
1048     + if (ret < 0)
1049     shost_printk(KERN_WARNING, target->scsi_host,
1050     PFX "Path record query failed\n");
1051    
1052     - return ch->status;
1053     +put:
1054     + scsi_host_put(target->scsi_host);
1055     +
1056     +out:
1057     + return ret;
1058     }
1059    
1060     static int srp_send_req(struct srp_rdma_ch *ch, bool multich)
1061     diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
1062     index 0b1f69ed2e92..b9748970df4a 100644
1063     --- a/drivers/infiniband/ulp/srpt/ib_srpt.c
1064     +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
1065     @@ -2750,7 +2750,7 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
1066     {
1067     const char *p;
1068     unsigned len, count, leading_zero_bytes;
1069     - int ret, rc;
1070     + int ret;
1071    
1072     p = name;
1073     if (strncasecmp(p, "0x", 2) == 0)
1074     @@ -2762,10 +2762,9 @@ static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
1075     count = min(len / 2, 16U);
1076     leading_zero_bytes = 16 - count;
1077     memset(i_port_id, 0, leading_zero_bytes);
1078     - rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
1079     - if (rc < 0)
1080     - pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
1081     - ret = 0;
1082     + ret = hex2bin(i_port_id + leading_zero_bytes, p, count);
1083     + if (ret < 0)
1084     + pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", ret);
1085     out:
1086     return ret;
1087     }
1088     diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
1089     index 24d388d74011..a37576a1798d 100644
1090     --- a/drivers/irqchip/irq-gic-v3.c
1091     +++ b/drivers/irqchip/irq-gic-v3.c
1092     @@ -1022,18 +1022,18 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1093     int nr_parts;
1094     struct partition_affinity *parts;
1095    
1096     - parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
1097     + parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
1098     if (!parts_node)
1099     return;
1100    
1101     nr_parts = of_get_child_count(parts_node);
1102    
1103     if (!nr_parts)
1104     - return;
1105     + goto out_put_node;
1106    
1107     parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
1108     if (WARN_ON(!parts))
1109     - return;
1110     + goto out_put_node;
1111    
1112     for_each_child_of_node(parts_node, child_part) {
1113     struct partition_affinity *part;
1114     @@ -1100,6 +1100,9 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
1115    
1116     gic_data.ppi_descs[i] = desc;
1117     }
1118     +
1119     +out_put_node:
1120     + of_node_put(parts_node);
1121     }
1122    
1123     static void __init gic_of_setup_kvm_info(struct device_node *node)
1124     diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
1125     index ca4abe1ccd8d..3fba31cea66e 100644
1126     --- a/drivers/md/bcache/alloc.c
1127     +++ b/drivers/md/bcache/alloc.c
1128     @@ -404,7 +404,8 @@ long bch_bucket_alloc(struct cache *ca, unsigned reserve, bool wait)
1129    
1130     finish_wait(&ca->set->bucket_wait, &w);
1131     out:
1132     - wake_up_process(ca->alloc_thread);
1133     + if (ca->alloc_thread)
1134     + wake_up_process(ca->alloc_thread);
1135    
1136     trace_bcache_alloc(ca, reserve);
1137    
1138     diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
1139     index 8bf9667ff46b..7643f72adb1c 100644
1140     --- a/drivers/md/dm-bufio.c
1141     +++ b/drivers/md/dm-bufio.c
1142     @@ -937,7 +937,8 @@ static void __get_memory_limit(struct dm_bufio_client *c,
1143     buffers = c->minimum_buffers;
1144    
1145     *limit_buffers = buffers;
1146     - *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
1147     + *threshold_buffers = mult_frac(buffers,
1148     + DM_BUFIO_WRITEBACK_PERCENT, 100);
1149     }
1150    
1151     /*
1152     @@ -1856,19 +1857,15 @@ static int __init dm_bufio_init(void)
1153     memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1154     memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1155    
1156     - mem = (__u64)((totalram_pages - totalhigh_pages) *
1157     - DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1158     + mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
1159     + DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
1160    
1161     if (mem > ULONG_MAX)
1162     mem = ULONG_MAX;
1163    
1164     #ifdef CONFIG_MMU
1165     - /*
1166     - * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1167     - * in fs/proc/internal.h
1168     - */
1169     - if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1170     - mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1171     + if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1172     + mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
1173     #endif
1174    
1175     dm_bufio_default_cache_size = mem;
1176     diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
1177     index 40ceba1fe8be..1609d4971104 100644
1178     --- a/drivers/md/dm-core.h
1179     +++ b/drivers/md/dm-core.h
1180     @@ -29,7 +29,6 @@ struct dm_kobject_holder {
1181     * DM targets must _not_ deference a mapped_device to directly access its members!
1182     */
1183     struct mapped_device {
1184     - struct srcu_struct io_barrier;
1185     struct mutex suspend_lock;
1186    
1187     /*
1188     @@ -127,6 +126,8 @@ struct mapped_device {
1189     struct blk_mq_tag_set *tag_set;
1190     bool use_blk_mq:1;
1191     bool init_tio_pdu:1;
1192     +
1193     + struct srcu_struct io_barrier;
1194     };
1195    
1196     void dm_init_md_queue(struct mapped_device *md);
1197     diff --git a/drivers/md/dm.c b/drivers/md/dm.c
1198     index e66f4040d84b..c5522551122f 100644
1199     --- a/drivers/md/dm.c
1200     +++ b/drivers/md/dm.c
1201     @@ -21,6 +21,7 @@
1202     #include <linux/delay.h>
1203     #include <linux/wait.h>
1204     #include <linux/pr.h>
1205     +#include <linux/vmalloc.h>
1206    
1207     #define DM_MSG_PREFIX "core"
1208    
1209     @@ -1511,7 +1512,7 @@ static struct mapped_device *alloc_dev(int minor)
1210     struct mapped_device *md;
1211     void *old_md;
1212    
1213     - md = kzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
1214     + md = vzalloc_node(sizeof(*md), numa_node_id);
1215     if (!md) {
1216     DMWARN("unable to allocate device, out of memory.");
1217     return NULL;
1218     @@ -1605,7 +1606,7 @@ static struct mapped_device *alloc_dev(int minor)
1219     bad_minor:
1220     module_put(THIS_MODULE);
1221     bad_module_get:
1222     - kfree(md);
1223     + kvfree(md);
1224     return NULL;
1225     }
1226    
1227     @@ -1624,7 +1625,7 @@ static void free_dev(struct mapped_device *md)
1228     free_minor(minor);
1229    
1230     module_put(THIS_MODULE);
1231     - kfree(md);
1232     + kvfree(md);
1233     }
1234    
1235     static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
1236     @@ -2514,11 +2515,15 @@ struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1237    
1238     md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
1239    
1240     - if (test_bit(DMF_FREEING, &md->flags) ||
1241     - dm_deleting_md(md))
1242     - return NULL;
1243     -
1244     + spin_lock(&_minor_lock);
1245     + if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
1246     + md = NULL;
1247     + goto out;
1248     + }
1249     dm_get(md);
1250     +out:
1251     + spin_unlock(&_minor_lock);
1252     +
1253     return md;
1254     }
1255    
1256     diff --git a/drivers/media/rc/ir-lirc-codec.c b/drivers/media/rc/ir-lirc-codec.c
1257     index b49f80cb49c9..d9a5710532f4 100644
1258     --- a/drivers/media/rc/ir-lirc-codec.c
1259     +++ b/drivers/media/rc/ir-lirc-codec.c
1260     @@ -286,11 +286,14 @@ static long ir_lirc_ioctl(struct file *filep, unsigned int cmd,
1261     if (!dev->max_timeout)
1262     return -ENOSYS;
1263    
1264     + /* Check for multiply overflow */
1265     + if (val > U32_MAX / 1000)
1266     + return -EINVAL;
1267     +
1268     tmp = val * 1000;
1269    
1270     - if (tmp < dev->min_timeout ||
1271     - tmp > dev->max_timeout)
1272     - return -EINVAL;
1273     + if (tmp < dev->min_timeout || tmp > dev->max_timeout)
1274     + return -EINVAL;
1275    
1276     if (dev->s_timeout)
1277     ret = dev->s_timeout(dev, tmp);
1278     diff --git a/drivers/media/usb/as102/as102_fw.c b/drivers/media/usb/as102/as102_fw.c
1279     index 5a28ce3a1d49..38dbc128340d 100644
1280     --- a/drivers/media/usb/as102/as102_fw.c
1281     +++ b/drivers/media/usb/as102/as102_fw.c
1282     @@ -101,18 +101,23 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
1283     unsigned char *cmd,
1284     const struct firmware *firmware) {
1285    
1286     - struct as10x_fw_pkt_t fw_pkt;
1287     + struct as10x_fw_pkt_t *fw_pkt;
1288     int total_read_bytes = 0, errno = 0;
1289     unsigned char addr_has_changed = 0;
1290    
1291     + fw_pkt = kmalloc(sizeof(*fw_pkt), GFP_KERNEL);
1292     + if (!fw_pkt)
1293     + return -ENOMEM;
1294     +
1295     +
1296     for (total_read_bytes = 0; total_read_bytes < firmware->size; ) {
1297     int read_bytes = 0, data_len = 0;
1298    
1299     /* parse intel hex line */
1300     read_bytes = parse_hex_line(
1301     (u8 *) (firmware->data + total_read_bytes),
1302     - fw_pkt.raw.address,
1303     - fw_pkt.raw.data,
1304     + fw_pkt->raw.address,
1305     + fw_pkt->raw.data,
1306     &data_len,
1307     &addr_has_changed);
1308    
1309     @@ -122,28 +127,28 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
1310     /* detect the end of file */
1311     total_read_bytes += read_bytes;
1312     if (total_read_bytes == firmware->size) {
1313     - fw_pkt.u.request[0] = 0x00;
1314     - fw_pkt.u.request[1] = 0x03;
1315     + fw_pkt->u.request[0] = 0x00;
1316     + fw_pkt->u.request[1] = 0x03;
1317    
1318     /* send EOF command */
1319     errno = bus_adap->ops->upload_fw_pkt(bus_adap,
1320     (uint8_t *)
1321     - &fw_pkt, 2, 0);
1322     + fw_pkt, 2, 0);
1323     if (errno < 0)
1324     goto error;
1325     } else {
1326     if (!addr_has_changed) {
1327     /* prepare command to send */
1328     - fw_pkt.u.request[0] = 0x00;
1329     - fw_pkt.u.request[1] = 0x01;
1330     + fw_pkt->u.request[0] = 0x00;
1331     + fw_pkt->u.request[1] = 0x01;
1332    
1333     - data_len += sizeof(fw_pkt.u.request);
1334     - data_len += sizeof(fw_pkt.raw.address);
1335     + data_len += sizeof(fw_pkt->u.request);
1336     + data_len += sizeof(fw_pkt->raw.address);
1337    
1338     /* send cmd to device */
1339     errno = bus_adap->ops->upload_fw_pkt(bus_adap,
1340     (uint8_t *)
1341     - &fw_pkt,
1342     + fw_pkt,
1343     data_len,
1344     0);
1345     if (errno < 0)
1346     @@ -152,6 +157,7 @@ static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap,
1347     }
1348     }
1349     error:
1350     + kfree(fw_pkt);
1351     return (errno == 0) ? total_read_bytes : errno;
1352     }
1353    
1354     diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
1355     index be9e3335dcb7..921cf1edb3b1 100644
1356     --- a/drivers/media/usb/cx231xx/cx231xx-cards.c
1357     +++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
1358     @@ -1622,7 +1622,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
1359     nr = dev->devno;
1360    
1361     assoc_desc = udev->actconfig->intf_assoc[0];
1362     - if (assoc_desc->bFirstInterface != ifnum) {
1363     + if (!assoc_desc || assoc_desc->bFirstInterface != ifnum) {
1364     dev_err(d, "Not found matching IAD interface\n");
1365     retval = -ENODEV;
1366     goto err_if;
1367     diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
1368     index adc2147fcff7..bd6884223a0d 100644
1369     --- a/drivers/media/v4l2-core/v4l2-ctrls.c
1370     +++ b/drivers/media/v4l2-core/v4l2-ctrls.c
1371     @@ -1219,6 +1219,16 @@ void v4l2_ctrl_fill(u32 id, const char **name, enum v4l2_ctrl_type *type,
1372     }
1373     EXPORT_SYMBOL(v4l2_ctrl_fill);
1374    
1375     +static u32 user_flags(const struct v4l2_ctrl *ctrl)
1376     +{
1377     + u32 flags = ctrl->flags;
1378     +
1379     + if (ctrl->is_ptr)
1380     + flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
1381     +
1382     + return flags;
1383     +}
1384     +
1385     static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 changes)
1386     {
1387     memset(ev->reserved, 0, sizeof(ev->reserved));
1388     @@ -1226,7 +1236,7 @@ static void fill_event(struct v4l2_event *ev, struct v4l2_ctrl *ctrl, u32 change
1389     ev->id = ctrl->id;
1390     ev->u.ctrl.changes = changes;
1391     ev->u.ctrl.type = ctrl->type;
1392     - ev->u.ctrl.flags = ctrl->flags;
1393     + ev->u.ctrl.flags = user_flags(ctrl);
1394     if (ctrl->is_ptr)
1395     ev->u.ctrl.value64 = 0;
1396     else
1397     @@ -2550,10 +2560,8 @@ int v4l2_query_ext_ctrl(struct v4l2_ctrl_handler *hdl, struct v4l2_query_ext_ctr
1398     else
1399     qc->id = ctrl->id;
1400     strlcpy(qc->name, ctrl->name, sizeof(qc->name));
1401     - qc->flags = ctrl->flags;
1402     + qc->flags = user_flags(ctrl);
1403     qc->type = ctrl->type;
1404     - if (ctrl->is_ptr)
1405     - qc->flags |= V4L2_CTRL_FLAG_HAS_PAYLOAD;
1406     qc->elem_size = ctrl->elem_size;
1407     qc->elems = ctrl->elems;
1408     qc->nr_of_dims = ctrl->nr_of_dims;
1409     diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c
1410     index dbf256217b3e..ada2d88fd4c7 100644
1411     --- a/drivers/mtd/nand/mtk_ecc.c
1412     +++ b/drivers/mtd/nand/mtk_ecc.c
1413     @@ -116,6 +116,11 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
1414     op = ECC_DECODE;
1415     dec = readw(ecc->regs + ECC_DECDONE);
1416     if (dec & ecc->sectors) {
1417     + /*
1418     + * Clear decode IRQ status once again to ensure that
1419     + * there will be no extra IRQ.
1420     + */
1421     + readw(ecc->regs + ECC_DECIRQ_STA);
1422     ecc->sectors = 0;
1423     complete(&ecc->done);
1424     } else {
1425     @@ -131,8 +136,6 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id)
1426     }
1427     }
1428    
1429     - writel(0, ecc->regs + ECC_IRQ_REG(op));
1430     -
1431     return IRQ_HANDLED;
1432     }
1433    
1434     @@ -342,6 +345,12 @@ void mtk_ecc_disable(struct mtk_ecc *ecc)
1435    
1436     /* disable it */
1437     mtk_ecc_wait_idle(ecc, op);
1438     + if (op == ECC_DECODE)
1439     + /*
1440     + * Clear decode IRQ status in case there is a timeout to wait
1441     + * decode IRQ.
1442     + */
1443     + readw(ecc->regs + ECC_DECIRQ_STA);
1444     writew(0, ecc->regs + ECC_IRQ_REG(op));
1445     writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op));
1446    
1447     diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
1448     index 31a6ee307d80..a77cfd74a92e 100644
1449     --- a/drivers/mtd/nand/nand_base.c
1450     +++ b/drivers/mtd/nand/nand_base.c
1451     @@ -2935,15 +2935,18 @@ static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
1452     size_t *retlen, const uint8_t *buf)
1453     {
1454     struct nand_chip *chip = mtd_to_nand(mtd);
1455     + int chipnr = (int)(to >> chip->chip_shift);
1456     struct mtd_oob_ops ops;
1457     int ret;
1458    
1459     - /* Wait for the device to get ready */
1460     - panic_nand_wait(mtd, chip, 400);
1461     -
1462     /* Grab the device */
1463     panic_nand_get_device(chip, mtd, FL_WRITING);
1464    
1465     + chip->select_chip(mtd, chipnr);
1466     +
1467     + /* Wait for the device to get ready */
1468     + panic_nand_wait(mtd, chip, 400);
1469     +
1470     memset(&ops, 0, sizeof(ops));
1471     ops.len = len;
1472     ops.datbuf = (uint8_t *)buf;
1473     diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
1474     index c178cb0dd219..f3a516b3f108 100644
1475     --- a/drivers/mtd/nand/omap2.c
1476     +++ b/drivers/mtd/nand/omap2.c
1477     @@ -1133,129 +1133,172 @@ static u8 bch8_polynomial[] = {0xef, 0x51, 0x2e, 0x09, 0xed, 0x93, 0x9a, 0xc2,
1478     0x97, 0x79, 0xe5, 0x24, 0xb5};
1479    
1480     /**
1481     - * omap_calculate_ecc_bch - Generate bytes of ECC bytes
1482     + * _omap_calculate_ecc_bch - Generate ECC bytes for one sector
1483     * @mtd: MTD device structure
1484     * @dat: The pointer to data on which ecc is computed
1485     * @ecc_code: The ecc_code buffer
1486     + * @i: The sector number (for a multi sector page)
1487     *
1488     - * Support calculating of BCH4/8 ecc vectors for the page
1489     + * Support calculating of BCH4/8/16 ECC vectors for one sector
1490     + * within a page. Sector number is in @i.
1491     */
1492     -static int __maybe_unused omap_calculate_ecc_bch(struct mtd_info *mtd,
1493     - const u_char *dat, u_char *ecc_calc)
1494     +static int _omap_calculate_ecc_bch(struct mtd_info *mtd,
1495     + const u_char *dat, u_char *ecc_calc, int i)
1496     {
1497     struct omap_nand_info *info = mtd_to_omap(mtd);
1498     int eccbytes = info->nand.ecc.bytes;
1499     struct gpmc_nand_regs *gpmc_regs = &info->reg;
1500     u8 *ecc_code;
1501     - unsigned long nsectors, bch_val1, bch_val2, bch_val3, bch_val4;
1502     + unsigned long bch_val1, bch_val2, bch_val3, bch_val4;
1503     u32 val;
1504     - int i, j;
1505     + int j;
1506     +
1507     + ecc_code = ecc_calc;
1508     + switch (info->ecc_opt) {
1509     + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1510     + case OMAP_ECC_BCH8_CODE_HW:
1511     + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1512     + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1513     + bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
1514     + bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
1515     + *ecc_code++ = (bch_val4 & 0xFF);
1516     + *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1517     + *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1518     + *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1519     + *ecc_code++ = (bch_val3 & 0xFF);
1520     + *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1521     + *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1522     + *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1523     + *ecc_code++ = (bch_val2 & 0xFF);
1524     + *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1525     + *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1526     + *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1527     + *ecc_code++ = (bch_val1 & 0xFF);
1528     + break;
1529     + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1530     + case OMAP_ECC_BCH4_CODE_HW:
1531     + bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1532     + bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1533     + *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1534     + *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1535     + *ecc_code++ = ((bch_val2 & 0xF) << 4) |
1536     + ((bch_val1 >> 28) & 0xF);
1537     + *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1538     + *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1539     + *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1540     + *ecc_code++ = ((bch_val1 & 0xF) << 4);
1541     + break;
1542     + case OMAP_ECC_BCH16_CODE_HW:
1543     + val = readl(gpmc_regs->gpmc_bch_result6[i]);
1544     + ecc_code[0] = ((val >> 8) & 0xFF);
1545     + ecc_code[1] = ((val >> 0) & 0xFF);
1546     + val = readl(gpmc_regs->gpmc_bch_result5[i]);
1547     + ecc_code[2] = ((val >> 24) & 0xFF);
1548     + ecc_code[3] = ((val >> 16) & 0xFF);
1549     + ecc_code[4] = ((val >> 8) & 0xFF);
1550     + ecc_code[5] = ((val >> 0) & 0xFF);
1551     + val = readl(gpmc_regs->gpmc_bch_result4[i]);
1552     + ecc_code[6] = ((val >> 24) & 0xFF);
1553     + ecc_code[7] = ((val >> 16) & 0xFF);
1554     + ecc_code[8] = ((val >> 8) & 0xFF);
1555     + ecc_code[9] = ((val >> 0) & 0xFF);
1556     + val = readl(gpmc_regs->gpmc_bch_result3[i]);
1557     + ecc_code[10] = ((val >> 24) & 0xFF);
1558     + ecc_code[11] = ((val >> 16) & 0xFF);
1559     + ecc_code[12] = ((val >> 8) & 0xFF);
1560     + ecc_code[13] = ((val >> 0) & 0xFF);
1561     + val = readl(gpmc_regs->gpmc_bch_result2[i]);
1562     + ecc_code[14] = ((val >> 24) & 0xFF);
1563     + ecc_code[15] = ((val >> 16) & 0xFF);
1564     + ecc_code[16] = ((val >> 8) & 0xFF);
1565     + ecc_code[17] = ((val >> 0) & 0xFF);
1566     + val = readl(gpmc_regs->gpmc_bch_result1[i]);
1567     + ecc_code[18] = ((val >> 24) & 0xFF);
1568     + ecc_code[19] = ((val >> 16) & 0xFF);
1569     + ecc_code[20] = ((val >> 8) & 0xFF);
1570     + ecc_code[21] = ((val >> 0) & 0xFF);
1571     + val = readl(gpmc_regs->gpmc_bch_result0[i]);
1572     + ecc_code[22] = ((val >> 24) & 0xFF);
1573     + ecc_code[23] = ((val >> 16) & 0xFF);
1574     + ecc_code[24] = ((val >> 8) & 0xFF);
1575     + ecc_code[25] = ((val >> 0) & 0xFF);
1576     + break;
1577     + default:
1578     + return -EINVAL;
1579     + }
1580     +
1581     + /* ECC scheme specific syndrome customizations */
1582     + switch (info->ecc_opt) {
1583     + case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1584     + /* Add constant polynomial to remainder, so that
1585     + * ECC of blank pages results in 0x0 on reading back
1586     + */
1587     + for (j = 0; j < eccbytes; j++)
1588     + ecc_calc[j] ^= bch4_polynomial[j];
1589     + break;
1590     + case OMAP_ECC_BCH4_CODE_HW:
1591     + /* Set 8th ECC byte as 0x0 for ROM compatibility */
1592     + ecc_calc[eccbytes - 1] = 0x0;
1593     + break;
1594     + case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1595     + /* Add constant polynomial to remainder, so that
1596     + * ECC of blank pages results in 0x0 on reading back
1597     + */
1598     + for (j = 0; j < eccbytes; j++)
1599     + ecc_calc[j] ^= bch8_polynomial[j];
1600     + break;
1601     + case OMAP_ECC_BCH8_CODE_HW:
1602     + /* Set 14th ECC byte as 0x0 for ROM compatibility */
1603     + ecc_calc[eccbytes - 1] = 0x0;
1604     + break;
1605     + case OMAP_ECC_BCH16_CODE_HW:
1606     + break;
1607     + default:
1608     + return -EINVAL;
1609     + }
1610     +
1611     + return 0;
1612     +}
1613     +
1614     +/**
1615     + * omap_calculate_ecc_bch_sw - ECC generator for sector for SW based correction
1616     + * @mtd: MTD device structure
1617     + * @dat: The pointer to data on which ecc is computed
1618     + * @ecc_code: The ecc_code buffer
1619     + *
1620     + * Support calculating of BCH4/8/16 ECC vectors for one sector. This is used
1621     + * when SW based correction is required as ECC is required for one sector
1622     + * at a time.
1623     + */
1624     +static int omap_calculate_ecc_bch_sw(struct mtd_info *mtd,
1625     + const u_char *dat, u_char *ecc_calc)
1626     +{
1627     + return _omap_calculate_ecc_bch(mtd, dat, ecc_calc, 0);
1628     +}
1629     +
1630     +/**
1631     + * omap_calculate_ecc_bch_multi - Generate ECC for multiple sectors
1632     + * @mtd: MTD device structure
1633     + * @dat: The pointer to data on which ecc is computed
1634     + * @ecc_code: The ecc_code buffer
1635     + *
1636     + * Support calculating of BCH4/8/16 ecc vectors for the entire page in one go.
1637     + */
1638     +static int omap_calculate_ecc_bch_multi(struct mtd_info *mtd,
1639     + const u_char *dat, u_char *ecc_calc)
1640     +{
1641     + struct omap_nand_info *info = mtd_to_omap(mtd);
1642     + int eccbytes = info->nand.ecc.bytes;
1643     + unsigned long nsectors;
1644     + int i, ret;
1645    
1646     nsectors = ((readl(info->reg.gpmc_ecc_config) >> 4) & 0x7) + 1;
1647     for (i = 0; i < nsectors; i++) {
1648     - ecc_code = ecc_calc;
1649     - switch (info->ecc_opt) {
1650     - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1651     - case OMAP_ECC_BCH8_CODE_HW:
1652     - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1653     - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1654     - bch_val3 = readl(gpmc_regs->gpmc_bch_result2[i]);
1655     - bch_val4 = readl(gpmc_regs->gpmc_bch_result3[i]);
1656     - *ecc_code++ = (bch_val4 & 0xFF);
1657     - *ecc_code++ = ((bch_val3 >> 24) & 0xFF);
1658     - *ecc_code++ = ((bch_val3 >> 16) & 0xFF);
1659     - *ecc_code++ = ((bch_val3 >> 8) & 0xFF);
1660     - *ecc_code++ = (bch_val3 & 0xFF);
1661     - *ecc_code++ = ((bch_val2 >> 24) & 0xFF);
1662     - *ecc_code++ = ((bch_val2 >> 16) & 0xFF);
1663     - *ecc_code++ = ((bch_val2 >> 8) & 0xFF);
1664     - *ecc_code++ = (bch_val2 & 0xFF);
1665     - *ecc_code++ = ((bch_val1 >> 24) & 0xFF);
1666     - *ecc_code++ = ((bch_val1 >> 16) & 0xFF);
1667     - *ecc_code++ = ((bch_val1 >> 8) & 0xFF);
1668     - *ecc_code++ = (bch_val1 & 0xFF);
1669     - break;
1670     - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1671     - case OMAP_ECC_BCH4_CODE_HW:
1672     - bch_val1 = readl(gpmc_regs->gpmc_bch_result0[i]);
1673     - bch_val2 = readl(gpmc_regs->gpmc_bch_result1[i]);
1674     - *ecc_code++ = ((bch_val2 >> 12) & 0xFF);
1675     - *ecc_code++ = ((bch_val2 >> 4) & 0xFF);
1676     - *ecc_code++ = ((bch_val2 & 0xF) << 4) |
1677     - ((bch_val1 >> 28) & 0xF);
1678     - *ecc_code++ = ((bch_val1 >> 20) & 0xFF);
1679     - *ecc_code++ = ((bch_val1 >> 12) & 0xFF);
1680     - *ecc_code++ = ((bch_val1 >> 4) & 0xFF);
1681     - *ecc_code++ = ((bch_val1 & 0xF) << 4);
1682     - break;
1683     - case OMAP_ECC_BCH16_CODE_HW:
1684     - val = readl(gpmc_regs->gpmc_bch_result6[i]);
1685     - ecc_code[0] = ((val >> 8) & 0xFF);
1686     - ecc_code[1] = ((val >> 0) & 0xFF);
1687     - val = readl(gpmc_regs->gpmc_bch_result5[i]);
1688     - ecc_code[2] = ((val >> 24) & 0xFF);
1689     - ecc_code[3] = ((val >> 16) & 0xFF);
1690     - ecc_code[4] = ((val >> 8) & 0xFF);
1691     - ecc_code[5] = ((val >> 0) & 0xFF);
1692     - val = readl(gpmc_regs->gpmc_bch_result4[i]);
1693     - ecc_code[6] = ((val >> 24) & 0xFF);
1694     - ecc_code[7] = ((val >> 16) & 0xFF);
1695     - ecc_code[8] = ((val >> 8) & 0xFF);
1696     - ecc_code[9] = ((val >> 0) & 0xFF);
1697     - val = readl(gpmc_regs->gpmc_bch_result3[i]);
1698     - ecc_code[10] = ((val >> 24) & 0xFF);
1699     - ecc_code[11] = ((val >> 16) & 0xFF);
1700     - ecc_code[12] = ((val >> 8) & 0xFF);
1701     - ecc_code[13] = ((val >> 0) & 0xFF);
1702     - val = readl(gpmc_regs->gpmc_bch_result2[i]);
1703     - ecc_code[14] = ((val >> 24) & 0xFF);
1704     - ecc_code[15] = ((val >> 16) & 0xFF);
1705     - ecc_code[16] = ((val >> 8) & 0xFF);
1706     - ecc_code[17] = ((val >> 0) & 0xFF);
1707     - val = readl(gpmc_regs->gpmc_bch_result1[i]);
1708     - ecc_code[18] = ((val >> 24) & 0xFF);
1709     - ecc_code[19] = ((val >> 16) & 0xFF);
1710     - ecc_code[20] = ((val >> 8) & 0xFF);
1711     - ecc_code[21] = ((val >> 0) & 0xFF);
1712     - val = readl(gpmc_regs->gpmc_bch_result0[i]);
1713     - ecc_code[22] = ((val >> 24) & 0xFF);
1714     - ecc_code[23] = ((val >> 16) & 0xFF);
1715     - ecc_code[24] = ((val >> 8) & 0xFF);
1716     - ecc_code[25] = ((val >> 0) & 0xFF);
1717     - break;
1718     - default:
1719     - return -EINVAL;
1720     - }
1721     -
1722     - /* ECC scheme specific syndrome customizations */
1723     - switch (info->ecc_opt) {
1724     - case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
1725     - /* Add constant polynomial to remainder, so that
1726     - * ECC of blank pages results in 0x0 on reading back */
1727     - for (j = 0; j < eccbytes; j++)
1728     - ecc_calc[j] ^= bch4_polynomial[j];
1729     - break;
1730     - case OMAP_ECC_BCH4_CODE_HW:
1731     - /* Set 8th ECC byte as 0x0 for ROM compatibility */
1732     - ecc_calc[eccbytes - 1] = 0x0;
1733     - break;
1734     - case OMAP_ECC_BCH8_CODE_HW_DETECTION_SW:
1735     - /* Add constant polynomial to remainder, so that
1736     - * ECC of blank pages results in 0x0 on reading back */
1737     - for (j = 0; j < eccbytes; j++)
1738     - ecc_calc[j] ^= bch8_polynomial[j];
1739     - break;
1740     - case OMAP_ECC_BCH8_CODE_HW:
1741     - /* Set 14th ECC byte as 0x0 for ROM compatibility */
1742     - ecc_calc[eccbytes - 1] = 0x0;
1743     - break;
1744     - case OMAP_ECC_BCH16_CODE_HW:
1745     - break;
1746     - default:
1747     - return -EINVAL;
1748     - }
1749     + ret = _omap_calculate_ecc_bch(mtd, dat, ecc_calc, i);
1750     + if (ret)
1751     + return ret;
1752    
1753     - ecc_calc += eccbytes;
1754     + ecc_calc += eccbytes;
1755     }
1756    
1757     return 0;
1758     @@ -1496,7 +1539,7 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1759     chip->write_buf(mtd, buf, mtd->writesize);
1760    
1761     /* Update ecc vector from GPMC result registers */
1762     - chip->ecc.calculate(mtd, buf, &ecc_calc[0]);
1763     + omap_calculate_ecc_bch_multi(mtd, buf, &ecc_calc[0]);
1764    
1765     ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1766     chip->ecc.total);
1767     @@ -1508,6 +1551,72 @@ static int omap_write_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1768     return 0;
1769     }
1770    
1771     +/**
1772     + * omap_write_subpage_bch - BCH hardware ECC based subpage write
1773     + * @mtd: mtd info structure
1774     + * @chip: nand chip info structure
1775     + * @offset: column address of subpage within the page
1776     + * @data_len: data length
1777     + * @buf: data buffer
1778     + * @oob_required: must write chip->oob_poi to OOB
1779     + * @page: page number to write
1780     + *
1781     + * OMAP optimized subpage write method.
1782     + */
1783     +static int omap_write_subpage_bch(struct mtd_info *mtd,
1784     + struct nand_chip *chip, u32 offset,
1785     + u32 data_len, const u8 *buf,
1786     + int oob_required, int page)
1787     +{
1788     + u8 *ecc_calc = chip->buffers->ecccalc;
1789     + int ecc_size = chip->ecc.size;
1790     + int ecc_bytes = chip->ecc.bytes;
1791     + int ecc_steps = chip->ecc.steps;
1792     + u32 start_step = offset / ecc_size;
1793     + u32 end_step = (offset + data_len - 1) / ecc_size;
1794     + int step, ret = 0;
1795     +
1796     + /*
1797     + * Write entire page at one go as it would be optimal
1798     + * as ECC is calculated by hardware.
1799     + * ECC is calculated for all subpages but we choose
1800     + * only what we want.
1801     + */
1802     +
1803     + /* Enable GPMC ECC engine */
1804     + chip->ecc.hwctl(mtd, NAND_ECC_WRITE);
1805     +
1806     + /* Write data */
1807     + chip->write_buf(mtd, buf, mtd->writesize);
1808     +
1809     + for (step = 0; step < ecc_steps; step++) {
1810     + /* mask ECC of un-touched subpages by padding 0xFF */
1811     + if (step < start_step || step > end_step)
1812     + memset(ecc_calc, 0xff, ecc_bytes);
1813     + else
1814     + ret = _omap_calculate_ecc_bch(mtd, buf, ecc_calc, step);
1815     +
1816     + if (ret)
1817     + return ret;
1818     +
1819     + buf += ecc_size;
1820     + ecc_calc += ecc_bytes;
1821     + }
1822     +
1823     + /* copy calculated ECC for whole page to chip->buffer->oob */
1824     + /* this include masked-value(0xFF) for unwritten subpages */
1825     + ecc_calc = chip->buffers->ecccalc;
1826     + ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
1827     + chip->ecc.total);
1828     + if (ret)
1829     + return ret;
1830     +
1831     + /* write OOB buffer to NAND device */
1832     + chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1833     +
1834     + return 0;
1835     +}
1836     +
1837     /**
1838     * omap_read_page_bch - BCH ecc based page read function for entire page
1839     * @mtd: mtd info structure
1840     @@ -1544,7 +1653,7 @@ static int omap_read_page_bch(struct mtd_info *mtd, struct nand_chip *chip,
1841     chip->ecc.total);
1842    
1843     /* Calculate ecc bytes */
1844     - chip->ecc.calculate(mtd, buf, ecc_calc);
1845     + omap_calculate_ecc_bch_multi(mtd, buf, ecc_calc);
1846    
1847     ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
1848     chip->ecc.total);
1849     @@ -2044,7 +2153,7 @@ static int omap_nand_probe(struct platform_device *pdev)
1850     nand_chip->ecc.strength = 4;
1851     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1852     nand_chip->ecc.correct = nand_bch_correct_data;
1853     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1854     + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
1855     mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
1856     /* Reserve one byte for the OMAP marker */
1857     oobbytes_per_step = nand_chip->ecc.bytes + 1;
1858     @@ -2066,9 +2175,9 @@ static int omap_nand_probe(struct platform_device *pdev)
1859     nand_chip->ecc.strength = 4;
1860     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1861     nand_chip->ecc.correct = omap_elm_correct_data;
1862     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1863     nand_chip->ecc.read_page = omap_read_page_bch;
1864     nand_chip->ecc.write_page = omap_write_page_bch;
1865     + nand_chip->ecc.write_subpage = omap_write_subpage_bch;
1866     mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1867     oobbytes_per_step = nand_chip->ecc.bytes;
1868    
1869     @@ -2087,7 +2196,7 @@ static int omap_nand_probe(struct platform_device *pdev)
1870     nand_chip->ecc.strength = 8;
1871     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1872     nand_chip->ecc.correct = nand_bch_correct_data;
1873     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1874     + nand_chip->ecc.calculate = omap_calculate_ecc_bch_sw;
1875     mtd_set_ooblayout(mtd, &omap_sw_ooblayout_ops);
1876     /* Reserve one byte for the OMAP marker */
1877     oobbytes_per_step = nand_chip->ecc.bytes + 1;
1878     @@ -2109,9 +2218,9 @@ static int omap_nand_probe(struct platform_device *pdev)
1879     nand_chip->ecc.strength = 8;
1880     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1881     nand_chip->ecc.correct = omap_elm_correct_data;
1882     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1883     nand_chip->ecc.read_page = omap_read_page_bch;
1884     nand_chip->ecc.write_page = omap_write_page_bch;
1885     + nand_chip->ecc.write_subpage = omap_write_subpage_bch;
1886     mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1887     oobbytes_per_step = nand_chip->ecc.bytes;
1888    
1889     @@ -2131,9 +2240,9 @@ static int omap_nand_probe(struct platform_device *pdev)
1890     nand_chip->ecc.strength = 16;
1891     nand_chip->ecc.hwctl = omap_enable_hwecc_bch;
1892     nand_chip->ecc.correct = omap_elm_correct_data;
1893     - nand_chip->ecc.calculate = omap_calculate_ecc_bch;
1894     nand_chip->ecc.read_page = omap_read_page_bch;
1895     nand_chip->ecc.write_page = omap_write_page_bch;
1896     + nand_chip->ecc.write_subpage = omap_write_subpage_bch;
1897     mtd_set_ooblayout(mtd, &omap_ooblayout_ops);
1898     oobbytes_per_step = nand_chip->ecc.bytes;
1899    
1900     diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
1901     index 8f8418d2ac4a..a0012c3cb4f6 100644
1902     --- a/drivers/net/ethernet/3com/typhoon.c
1903     +++ b/drivers/net/ethernet/3com/typhoon.c
1904     @@ -2366,9 +2366,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1905     * 4) Get the hardware address.
1906     * 5) Put the card to sleep.
1907     */
1908     - if (typhoon_reset(ioaddr, WaitSleep) < 0) {
1909     + err = typhoon_reset(ioaddr, WaitSleep);
1910     + if (err < 0) {
1911     err_msg = "could not reset 3XP";
1912     - err = -EIO;
1913     goto error_out_dma;
1914     }
1915    
1916     @@ -2382,24 +2382,25 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1917     typhoon_init_interface(tp);
1918     typhoon_init_rings(tp);
1919    
1920     - if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1921     + err = typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST);
1922     + if (err < 0) {
1923     err_msg = "cannot boot 3XP sleep image";
1924     - err = -EIO;
1925     goto error_out_reset;
1926     }
1927    
1928     INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
1929     - if(typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp) < 0) {
1930     + err = typhoon_issue_command(tp, 1, &xp_cmd, 1, xp_resp);
1931     + if (err < 0) {
1932     err_msg = "cannot read MAC address";
1933     - err = -EIO;
1934     goto error_out_reset;
1935     }
1936    
1937     *(__be16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp[0].parm1));
1938     *(__be32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp[0].parm2));
1939    
1940     - if(!is_valid_ether_addr(dev->dev_addr)) {
1941     + if (!is_valid_ether_addr(dev->dev_addr)) {
1942     err_msg = "Could not obtain valid ethernet address, aborting";
1943     + err = -EIO;
1944     goto error_out_reset;
1945     }
1946    
1947     @@ -2407,7 +2408,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1948     * later when we print out the version reported.
1949     */
1950     INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1951     - if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1952     + err = typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp);
1953     + if (err < 0) {
1954     err_msg = "Could not get Sleep Image version";
1955     goto error_out_reset;
1956     }
1957     @@ -2424,9 +2426,9 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1958     if(xp_resp[0].numDesc != 0)
1959     tp->capabilities |= TYPHOON_WAKEUP_NEEDS_RESET;
1960    
1961     - if(typhoon_sleep(tp, PCI_D3hot, 0) < 0) {
1962     + err = typhoon_sleep(tp, PCI_D3hot, 0);
1963     + if (err < 0) {
1964     err_msg = "cannot put adapter to sleep";
1965     - err = -EIO;
1966     goto error_out_reset;
1967     }
1968    
1969     @@ -2449,7 +2451,8 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1970     dev->features = dev->hw_features |
1971     NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_RXCSUM;
1972    
1973     - if(register_netdev(dev) < 0) {
1974     + err = register_netdev(dev);
1975     + if (err < 0) {
1976     err_msg = "unable to register netdev";
1977     goto error_out_reset;
1978     }
1979     diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1980     index 333df540b375..5d2cf56aed0e 100644
1981     --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1982     +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
1983     @@ -3800,6 +3800,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
1984     return rc;
1985     }
1986    
1987     +static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
1988     +{
1989     + int rc;
1990     +
1991     + if (BNXT_PF(bp)) {
1992     + struct hwrm_func_cfg_input req = {0};
1993     +
1994     + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
1995     + req.fid = cpu_to_le16(0xffff);
1996     + req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
1997     + req.async_event_cr = cpu_to_le16(idx);
1998     + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1999     + } else {
2000     + struct hwrm_func_vf_cfg_input req = {0};
2001     +
2002     + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
2003     + req.enables =
2004     + cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
2005     + req.async_event_cr = cpu_to_le16(idx);
2006     + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
2007     + }
2008     + return rc;
2009     +}
2010     +
2011     static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
2012     {
2013     int i, rc = 0;
2014     @@ -3816,6 +3840,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
2015     goto err_out;
2016     BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
2017     bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
2018     +
2019     + if (!i) {
2020     + rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
2021     + if (rc)
2022     + netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
2023     + }
2024     }
2025    
2026     for (i = 0; i < bp->tx_nr_rings; i++) {
2027     diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
2028     index 0641c0098738..afb7ebe20b24 100644
2029     --- a/drivers/net/ethernet/intel/e1000e/defines.h
2030     +++ b/drivers/net/ethernet/intel/e1000e/defines.h
2031     @@ -398,6 +398,7 @@
2032     #define E1000_ICR_LSC 0x00000004 /* Link Status Change */
2033     #define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
2034     #define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
2035     +#define E1000_ICR_RXO 0x00000040 /* Receiver Overrun */
2036     #define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
2037     #define E1000_ICR_ECCER 0x00400000 /* Uncorrectable ECC Error */
2038     /* If this bit asserted, the driver should claim the interrupt */
2039     diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
2040     index b322011ec282..f457c5703d0c 100644
2041     --- a/drivers/net/ethernet/intel/e1000e/mac.c
2042     +++ b/drivers/net/ethernet/intel/e1000e/mac.c
2043     @@ -410,6 +410,9 @@ void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
2044     * Checks to see of the link status of the hardware has changed. If a
2045     * change in link status has been detected, then we read the PHY registers
2046     * to get the current speed/duplex if link exists.
2047     + *
2048     + * Returns a negative error code (-E1000_ERR_*) or 0 (link down) or 1 (link
2049     + * up).
2050     **/
2051     s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
2052     {
2053     @@ -423,7 +426,7 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
2054     * Change or Rx Sequence Error interrupt.
2055     */
2056     if (!mac->get_link_status)
2057     - return 0;
2058     + return 1;
2059    
2060     /* First we want to see if the MII Status Register reports
2061     * link. If so, then we want to get the current speed/duplex
2062     @@ -461,10 +464,12 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
2063     * different link partner.
2064     */
2065     ret_val = e1000e_config_fc_after_link_up(hw);
2066     - if (ret_val)
2067     + if (ret_val) {
2068     e_dbg("Error configuring flow control\n");
2069     + return ret_val;
2070     + }
2071    
2072     - return ret_val;
2073     + return 1;
2074     }
2075    
2076     /**
2077     diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2078     index 7017281ba2dc..0feddf3393f9 100644
2079     --- a/drivers/net/ethernet/intel/e1000e/netdev.c
2080     +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2081     @@ -1905,14 +1905,30 @@ static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
2082     struct net_device *netdev = data;
2083     struct e1000_adapter *adapter = netdev_priv(netdev);
2084     struct e1000_hw *hw = &adapter->hw;
2085     + u32 icr;
2086     + bool enable = true;
2087     +
2088     + icr = er32(ICR);
2089     + if (icr & E1000_ICR_RXO) {
2090     + ew32(ICR, E1000_ICR_RXO);
2091     + enable = false;
2092     + /* napi poll will re-enable Other, make sure it runs */
2093     + if (napi_schedule_prep(&adapter->napi)) {
2094     + adapter->total_rx_bytes = 0;
2095     + adapter->total_rx_packets = 0;
2096     + __napi_schedule(&adapter->napi);
2097     + }
2098     + }
2099     + if (icr & E1000_ICR_LSC) {
2100     + ew32(ICR, E1000_ICR_LSC);
2101     + hw->mac.get_link_status = true;
2102     + /* guard against interrupt when we're going down */
2103     + if (!test_bit(__E1000_DOWN, &adapter->state))
2104     + mod_timer(&adapter->watchdog_timer, jiffies + 1);
2105     + }
2106    
2107     - hw->mac.get_link_status = true;
2108     -
2109     - /* guard against interrupt when we're going down */
2110     - if (!test_bit(__E1000_DOWN, &adapter->state)) {
2111     - mod_timer(&adapter->watchdog_timer, jiffies + 1);
2112     + if (enable && !test_bit(__E1000_DOWN, &adapter->state))
2113     ew32(IMS, E1000_IMS_OTHER);
2114     - }
2115    
2116     return IRQ_HANDLED;
2117     }
2118     @@ -2683,7 +2699,8 @@ static int e1000e_poll(struct napi_struct *napi, int weight)
2119     napi_complete_done(napi, work_done);
2120     if (!test_bit(__E1000_DOWN, &adapter->state)) {
2121     if (adapter->msix_entries)
2122     - ew32(IMS, adapter->rx_ring->ims_val);
2123     + ew32(IMS, adapter->rx_ring->ims_val |
2124     + E1000_IMS_OTHER);
2125     else
2126     e1000_irq_enable(adapter);
2127     }
2128     @@ -4178,7 +4195,7 @@ static void e1000e_trigger_lsc(struct e1000_adapter *adapter)
2129     struct e1000_hw *hw = &adapter->hw;
2130    
2131     if (adapter->msix_entries)
2132     - ew32(ICS, E1000_ICS_OTHER);
2133     + ew32(ICS, E1000_ICS_LSC | E1000_ICS_OTHER);
2134     else
2135     ew32(ICS, E1000_ICS_LSC);
2136     }
2137     @@ -5056,7 +5073,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
2138     case e1000_media_type_copper:
2139     if (hw->mac.get_link_status) {
2140     ret_val = hw->mac.ops.check_for_link(hw);
2141     - link_active = !hw->mac.get_link_status;
2142     + link_active = ret_val > 0;
2143     } else {
2144     link_active = true;
2145     }
2146     @@ -5074,7 +5091,7 @@ static bool e1000e_has_link(struct e1000_adapter *adapter)
2147     break;
2148     }
2149    
2150     - if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
2151     + if ((ret_val == -E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
2152     (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
2153     /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
2154     e_info("Gigabit has been disabled, downgrading speed\n");
2155     diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
2156     index d78d47b41a71..86ff0969efb6 100644
2157     --- a/drivers/net/ethernet/intel/e1000e/phy.c
2158     +++ b/drivers/net/ethernet/intel/e1000e/phy.c
2159     @@ -1744,6 +1744,7 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
2160     s32 ret_val = 0;
2161     u16 i, phy_status;
2162    
2163     + *success = false;
2164     for (i = 0; i < iterations; i++) {
2165     /* Some PHYs require the MII_BMSR register to be read
2166     * twice due to the link bit being sticky. No harm doing
2167     @@ -1763,16 +1764,16 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
2168     ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
2169     if (ret_val)
2170     break;
2171     - if (phy_status & BMSR_LSTATUS)
2172     + if (phy_status & BMSR_LSTATUS) {
2173     + *success = true;
2174     break;
2175     + }
2176     if (usec_interval >= 1000)
2177     msleep(usec_interval / 1000);
2178     else
2179     udelay(usec_interval);
2180     }
2181    
2182     - *success = (i < iterations);
2183     -
2184     return ret_val;
2185     }
2186    
2187     diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2188     index 5de937852436..2aae6f88dca0 100644
2189     --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2190     +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c
2191     @@ -1225,7 +1225,7 @@ static bool fm10k_clean_tx_irq(struct fm10k_q_vector *q_vector,
2192     break;
2193    
2194     /* prevent any other reads prior to eop_desc */
2195     - read_barrier_depends();
2196     + smp_rmb();
2197    
2198     /* if DD is not set pending work has not been completed */
2199     if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
2200     diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
2201     index 31c97e3937a4..2caafebb0295 100644
2202     --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
2203     +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
2204     @@ -3604,7 +3604,7 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
2205     break;
2206    
2207     /* prevent any other reads prior to eop_desc */
2208     - read_barrier_depends();
2209     + smp_rmb();
2210    
2211     /* if the descriptor isn't done, no work yet to do */
2212     if (!(eop_desc->cmd_type_offset_bsz &
2213     diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
2214     index 6287bf63c43c..c5430394fac9 100644
2215     --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
2216     +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
2217     @@ -679,7 +679,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
2218     break;
2219    
2220     /* prevent any other reads prior to eop_desc */
2221     - read_barrier_depends();
2222     + smp_rmb();
2223    
2224     /* we have caught up to head, no work left to do */
2225     if (tx_head == tx_desc)
2226     diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
2227     index 75f2a2cdd738..c03800d1000a 100644
2228     --- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
2229     +++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
2230     @@ -184,7 +184,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
2231     break;
2232    
2233     /* prevent any other reads prior to eop_desc */
2234     - read_barrier_depends();
2235     + smp_rmb();
2236    
2237     /* we have caught up to head, no work left to do */
2238     if (tx_head == tx_desc)
2239     diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
2240     index c6c2562d9df3..16839600fb78 100644
2241     --- a/drivers/net/ethernet/intel/igb/igb_main.c
2242     +++ b/drivers/net/ethernet/intel/igb/igb_main.c
2243     @@ -6660,7 +6660,7 @@ static bool igb_clean_tx_irq(struct igb_q_vector *q_vector, int napi_budget)
2244     break;
2245    
2246     /* prevent any other reads prior to eop_desc */
2247     - read_barrier_depends();
2248     + smp_rmb();
2249    
2250     /* if DD is not set pending work has not been completed */
2251     if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
2252     diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
2253     index 7dff7f6239cd..5428e39fa4e5 100644
2254     --- a/drivers/net/ethernet/intel/igbvf/netdev.c
2255     +++ b/drivers/net/ethernet/intel/igbvf/netdev.c
2256     @@ -810,7 +810,7 @@ static bool igbvf_clean_tx_irq(struct igbvf_ring *tx_ring)
2257     break;
2258    
2259     /* prevent any other reads prior to eop_desc */
2260     - read_barrier_depends();
2261     + smp_rmb();
2262    
2263     /* if DD is not set pending work has not been completed */
2264     if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
2265     diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2266     index 334eb96ecda3..a5428b6abdac 100644
2267     --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2268     +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
2269     @@ -1171,7 +1171,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
2270     break;
2271    
2272     /* prevent any other reads prior to eop_desc */
2273     - read_barrier_depends();
2274     + smp_rmb();
2275    
2276     /* if DD is not set pending work has not been completed */
2277     if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2278     diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2279     index cbf70fe4028a..1499ce2bf9f6 100644
2280     --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2281     +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
2282     @@ -325,7 +325,7 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
2283     break;
2284    
2285     /* prevent any other reads prior to eop_desc */
2286     - read_barrier_depends();
2287     + smp_rmb();
2288    
2289     /* if DD is not set pending work has not been completed */
2290     if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
2291     diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c
2292     index 70ecd82d674d..098c814e22c8 100644
2293     --- a/drivers/net/wireless/admtek/adm8211.c
2294     +++ b/drivers/net/wireless/admtek/adm8211.c
2295     @@ -413,6 +413,13 @@ static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
2296     skb_tail_pointer(newskb),
2297     RX_PKT_SIZE,
2298     PCI_DMA_FROMDEVICE);
2299     + if (pci_dma_mapping_error(priv->pdev,
2300     + priv->rx_buffers[entry].mapping)) {
2301     + priv->rx_buffers[entry].skb = NULL;
2302     + dev_kfree_skb(newskb);
2303     + skb = NULL;
2304     + /* TODO: update rx dropped stats */
2305     + }
2306     } else {
2307     skb = NULL;
2308     /* TODO: update rx dropped stats */
2309     @@ -1450,6 +1457,12 @@ static int adm8211_init_rings(struct ieee80211_hw *dev)
2310     skb_tail_pointer(rx_info->skb),
2311     RX_PKT_SIZE,
2312     PCI_DMA_FROMDEVICE);
2313     + if (pci_dma_mapping_error(priv->pdev, rx_info->mapping)) {
2314     + dev_kfree_skb(rx_info->skb);
2315     + rx_info->skb = NULL;
2316     + break;
2317     + }
2318     +
2319     desc->buffer1 = cpu_to_le32(rx_info->mapping);
2320     desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
2321     }
2322     @@ -1613,7 +1626,7 @@ static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int
2323     }
2324    
2325     /* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
2326     -static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
2327     +static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
2328     u16 plcp_signal,
2329     size_t hdrlen)
2330     {
2331     @@ -1625,6 +1638,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
2332    
2333     mapping = pci_map_single(priv->pdev, skb->data, skb->len,
2334     PCI_DMA_TODEVICE);
2335     + if (pci_dma_mapping_error(priv->pdev, mapping))
2336     + return -ENOMEM;
2337    
2338     spin_lock_irqsave(&priv->lock, flags);
2339    
2340     @@ -1657,6 +1672,8 @@ static void adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
2341    
2342     /* Trigger transmit poll */
2343     ADM8211_CSR_WRITE(TDR, 0);
2344     +
2345     + return 0;
2346     }
2347    
2348     /* Put adm8211_tx_hdr on skb and transmit */
2349     @@ -1710,7 +1727,10 @@ static void adm8211_tx(struct ieee80211_hw *dev,
2350    
2351     txhdr->retry_limit = info->control.rates[0].count;
2352    
2353     - adm8211_tx_raw(dev, skb, plcp_signal, hdrlen);
2354     + if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) {
2355     + /* Drop packet */
2356     + ieee80211_free_txskb(dev, skb);
2357     + }
2358     }
2359    
2360     static int adm8211_alloc_rings(struct ieee80211_hw *dev)
2361     @@ -1843,7 +1863,8 @@ static int adm8211_probe(struct pci_dev *pdev,
2362     priv->rx_ring_size = rx_ring_size;
2363     priv->tx_ring_size = tx_ring_size;
2364    
2365     - if (adm8211_alloc_rings(dev)) {
2366     + err = adm8211_alloc_rings(dev);
2367     + if (err) {
2368     printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
2369     pci_name(pdev));
2370     goto err_iounmap;
2371     diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
2372     index 366d3dcb8e9d..7b3017f55e3d 100644
2373     --- a/drivers/net/wireless/ath/ath10k/core.c
2374     +++ b/drivers/net/wireless/ath/ath10k/core.c
2375     @@ -691,8 +691,11 @@ static int ath10k_core_get_board_id_from_otp(struct ath10k *ar)
2376     "boot get otp board id result 0x%08x board_id %d chip_id %d\n",
2377     result, board_id, chip_id);
2378    
2379     - if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0)
2380     + if ((result & ATH10K_BMI_BOARD_ID_STATUS_MASK) != 0 ||
2381     + (board_id == 0)) {
2382     + ath10k_warn(ar, "board id is not exist in otp, ignore it\n");
2383     return -EOPNOTSUPP;
2384     + }
2385    
2386     ar->id.bmi_ids_valid = true;
2387     ar->id.bmi_board_id = board_id;
2388     diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
2389     index 30e98afa2e68..17ab8efdac35 100644
2390     --- a/drivers/net/wireless/ath/ath10k/mac.c
2391     +++ b/drivers/net/wireless/ath/ath10k/mac.c
2392     @@ -1224,6 +1224,36 @@ static int ath10k_monitor_recalc(struct ath10k *ar)
2393     return ath10k_monitor_stop(ar);
2394     }
2395    
2396     +static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
2397     +{
2398     + struct ath10k *ar = arvif->ar;
2399     +
2400     + lockdep_assert_held(&ar->conf_mutex);
2401     +
2402     + if (!arvif->is_started) {
2403     + ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
2404     + return false;
2405     + }
2406     +
2407     + return true;
2408     +}
2409     +
2410     +static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
2411     +{
2412     + struct ath10k *ar = arvif->ar;
2413     + u32 vdev_param;
2414     +
2415     + lockdep_assert_held(&ar->conf_mutex);
2416     +
2417     + vdev_param = ar->wmi.vdev_param->protection_mode;
2418     +
2419     + ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
2420     + arvif->vdev_id, arvif->use_cts_prot);
2421     +
2422     + return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2423     + arvif->use_cts_prot ? 1 : 0);
2424     +}
2425     +
2426     static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
2427     {
2428     struct ath10k *ar = arvif->ar;
2429     @@ -4668,7 +4698,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
2430     lockdep_assert_held(&ar->conf_mutex);
2431    
2432     list_for_each_entry(arvif, &ar->arvifs, list) {
2433     - WARN_ON(arvif->txpower < 0);
2434     + if (arvif->txpower <= 0)
2435     + continue;
2436    
2437     if (txpower == -1)
2438     txpower = arvif->txpower;
2439     @@ -4676,8 +4707,8 @@ static int ath10k_mac_txpower_recalc(struct ath10k *ar)
2440     txpower = min(txpower, arvif->txpower);
2441     }
2442    
2443     - if (WARN_ON(txpower == -1))
2444     - return -EINVAL;
2445     + if (txpower == -1)
2446     + return 0;
2447    
2448     ret = ath10k_mac_txpower_setup(ar, txpower);
2449     if (ret) {
2450     @@ -5321,20 +5352,18 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
2451    
2452     if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2453     arvif->use_cts_prot = info->use_cts_prot;
2454     - ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
2455     - arvif->vdev_id, info->use_cts_prot);
2456    
2457     ret = ath10k_recalc_rtscts_prot(arvif);
2458     if (ret)
2459     ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2460     arvif->vdev_id, ret);
2461    
2462     - vdev_param = ar->wmi.vdev_param->protection_mode;
2463     - ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
2464     - info->use_cts_prot ? 1 : 0);
2465     - if (ret)
2466     - ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
2467     - info->use_cts_prot, arvif->vdev_id, ret);
2468     + if (ath10k_mac_can_set_cts_prot(arvif)) {
2469     + ret = ath10k_mac_set_cts_prot(arvif);
2470     + if (ret)
2471     + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
2472     + arvif->vdev_id, ret);
2473     + }
2474     }
2475    
2476     if (changed & BSS_CHANGED_ERP_SLOT) {
2477     @@ -7355,6 +7384,13 @@ ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
2478     arvif->is_up = true;
2479     }
2480    
2481     + if (ath10k_mac_can_set_cts_prot(arvif)) {
2482     + ret = ath10k_mac_set_cts_prot(arvif);
2483     + if (ret)
2484     + ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
2485     + arvif->vdev_id, ret);
2486     + }
2487     +
2488     mutex_unlock(&ar->conf_mutex);
2489     return 0;
2490    
2491     diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2492     index e64f59300a7c..0e4d49adddd0 100644
2493     --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2494     +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
2495     @@ -1105,8 +1105,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
2496     struct ath10k_fw_stats_pdev *dst;
2497    
2498     src = data;
2499     - if (data_len < sizeof(*src))
2500     + if (data_len < sizeof(*src)) {
2501     + kfree(tb);
2502     return -EPROTO;
2503     + }
2504    
2505     data += sizeof(*src);
2506     data_len -= sizeof(*src);
2507     @@ -1126,8 +1128,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
2508     struct ath10k_fw_stats_vdev *dst;
2509    
2510     src = data;
2511     - if (data_len < sizeof(*src))
2512     + if (data_len < sizeof(*src)) {
2513     + kfree(tb);
2514     return -EPROTO;
2515     + }
2516    
2517     data += sizeof(*src);
2518     data_len -= sizeof(*src);
2519     @@ -1145,8 +1149,10 @@ static int ath10k_wmi_tlv_op_pull_fw_stats(struct ath10k *ar,
2520     struct ath10k_fw_stats_peer *dst;
2521    
2522     src = data;
2523     - if (data_len < sizeof(*src))
2524     + if (data_len < sizeof(*src)) {
2525     + kfree(tb);
2526     return -EPROTO;
2527     + }
2528    
2529     data += sizeof(*src);
2530     data_len -= sizeof(*src);
2531     diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c
2532     index d5a3bf91a03e..ab6d39e12069 100644
2533     --- a/drivers/net/wireless/intersil/p54/main.c
2534     +++ b/drivers/net/wireless/intersil/p54/main.c
2535     @@ -852,12 +852,11 @@ void p54_unregister_common(struct ieee80211_hw *dev)
2536     {
2537     struct p54_common *priv = dev->priv;
2538    
2539     -#ifdef CONFIG_P54_LEDS
2540     - p54_unregister_leds(priv);
2541     -#endif /* CONFIG_P54_LEDS */
2542     -
2543     if (priv->registered) {
2544     priv->registered = false;
2545     +#ifdef CONFIG_P54_LEDS
2546     + p54_unregister_leds(priv);
2547     +#endif /* CONFIG_P54_LEDS */
2548     ieee80211_unregister_hw(dev);
2549     }
2550    
2551     diff --git a/drivers/net/wireless/marvell/mwifiex/sdio.c b/drivers/net/wireless/marvell/mwifiex/sdio.c
2552     index 8718950004f3..8d601dcf2948 100644
2553     --- a/drivers/net/wireless/marvell/mwifiex/sdio.c
2554     +++ b/drivers/net/wireless/marvell/mwifiex/sdio.c
2555     @@ -2296,6 +2296,12 @@ static void mwifiex_recreate_adapter(struct sdio_mmc_card *card)
2556     mmc_hw_reset(func->card->host);
2557     sdio_release_host(func);
2558    
2559     + /* Previous save_adapter won't be valid after this. We will cancel
2560     + * pending work requests.
2561     + */
2562     + clear_bit(MWIFIEX_IFACE_WORK_DEVICE_DUMP, &iface_work_flags);
2563     + clear_bit(MWIFIEX_IFACE_WORK_CARD_RESET, &iface_work_flags);
2564     +
2565     mwifiex_sdio_probe(func, device_id);
2566     }
2567    
2568     diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
2569     index bf3f0a39908c..9fc6f1615343 100644
2570     --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
2571     +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c
2572     @@ -4707,8 +4707,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2573     rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
2574     else
2575     rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
2576     - rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
2577     - rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
2578     + rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 10);
2579     + rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 10);
2580     rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
2581    
2582     rt2800_register_read(rt2x00dev, LED_CFG, &reg);
2583     diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
2584     index 631df690adbe..f57bb2cd604e 100644
2585     --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
2586     +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
2587     @@ -57,7 +57,7 @@ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev,
2588     if (status >= 0)
2589     return 0;
2590    
2591     - if (status == -ENODEV) {
2592     + if (status == -ENODEV || status == -ENOENT) {
2593     /* Device has disappeared. */
2594     clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
2595     break;
2596     @@ -321,7 +321,7 @@ static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
2597    
2598     status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
2599     if (status) {
2600     - if (status == -ENODEV)
2601     + if (status == -ENODEV || status == -ENOENT)
2602     clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
2603     set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
2604     rt2x00lib_dmadone(entry);
2605     @@ -410,7 +410,7 @@ static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data)
2606    
2607     status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
2608     if (status) {
2609     - if (status == -ENODEV)
2610     + if (status == -ENODEV || status == -ENOENT)
2611     clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
2612     set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
2613     rt2x00lib_dmadone(entry);
2614     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
2615     index b3f6a9ed15d4..27a0e50c2793 100644
2616     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
2617     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
2618     @@ -664,7 +664,7 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
2619     struct rtl_priv *rtlpriv = rtl_priv(hw);
2620     struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2621     struct sk_buff *skb = NULL;
2622     -
2623     + bool rtstatus;
2624     u32 totalpacketlen;
2625     u8 u1rsvdpageloc[5] = { 0 };
2626     bool b_dlok = false;
2627     @@ -727,7 +727,9 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
2628     memcpy((u8 *)skb_put(skb, totalpacketlen),
2629     &reserved_page_packet, totalpacketlen);
2630    
2631     - b_dlok = true;
2632     + rtstatus = rtl_cmd_send_packet(hw, skb);
2633     + if (rtstatus)
2634     + b_dlok = true;
2635    
2636     if (b_dlok) {
2637     RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD ,
2638     diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
2639     index 1281ebe0c30a..82d53895ce4d 100644
2640     --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
2641     +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/hw.c
2642     @@ -1378,6 +1378,7 @@ static void _rtl8821ae_get_wakeup_reason(struct ieee80211_hw *hw)
2643    
2644     ppsc->wakeup_reason = 0;
2645    
2646     + do_gettimeofday(&ts);
2647     rtlhal->last_suspend_sec = ts.tv_sec;
2648    
2649     switch (fw_reason) {
2650     diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
2651     index fac7cabe8f56..d8d189d14834 100644
2652     --- a/drivers/nvdimm/label.c
2653     +++ b/drivers/nvdimm/label.c
2654     @@ -861,7 +861,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
2655     nsindex = to_namespace_index(ndd, 0);
2656     memset(nsindex, 0, ndd->nsarea.config_size);
2657     for (i = 0; i < 2; i++) {
2658     - int rc = nd_label_write_index(ndd, i, i*2, ND_NSINDEX_INIT);
2659     + int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
2660    
2661     if (rc)
2662     return rc;
2663     diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
2664     index a38ae34b74e4..b8fb1ef1fc15 100644
2665     --- a/drivers/nvdimm/namespace_devs.c
2666     +++ b/drivers/nvdimm/namespace_devs.c
2667     @@ -1451,7 +1451,7 @@ static umode_t namespace_visible(struct kobject *kobj,
2668     if (a == &dev_attr_resource.attr) {
2669     if (is_namespace_blk(dev))
2670     return 0;
2671     - return a->mode;
2672     + return 0400;
2673     }
2674    
2675     if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
2676     diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
2677     index 78cb3e2359bd..71eb6c637b60 100644
2678     --- a/drivers/nvdimm/pfn_devs.c
2679     +++ b/drivers/nvdimm/pfn_devs.c
2680     @@ -270,8 +270,16 @@ static struct attribute *nd_pfn_attributes[] = {
2681     NULL,
2682     };
2683    
2684     +static umode_t pfn_visible(struct kobject *kobj, struct attribute *a, int n)
2685     +{
2686     + if (a == &dev_attr_resource.attr)
2687     + return 0400;
2688     + return a->mode;
2689     +}
2690     +
2691     struct attribute_group nd_pfn_attribute_group = {
2692     .attrs = nd_pfn_attributes,
2693     + .is_visible = pfn_visible,
2694     };
2695    
2696     static const struct attribute_group *nd_pfn_attribute_groups[] = {
2697     diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
2698     index 6fe4c48a21e4..f791d46fe50f 100644
2699     --- a/drivers/nvme/target/admin-cmd.c
2700     +++ b/drivers/nvme/target/admin-cmd.c
2701     @@ -381,7 +381,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
2702     {
2703     struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
2704     u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
2705     - u64 val;
2706     u32 val32;
2707     u16 status = 0;
2708    
2709     @@ -391,8 +390,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
2710     (subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
2711     break;
2712     case NVME_FEAT_KATO:
2713     - val = le64_to_cpu(req->cmd->prop_set.value);
2714     - val32 = val & 0xffff;
2715     + val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
2716     req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
2717     nvmet_set_result(req, req->sq->ctrl->kato);
2718     break;
2719     diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
2720     index d266d800f246..60bada90cd75 100644
2721     --- a/drivers/pci/probe.c
2722     +++ b/drivers/pci/probe.c
2723     @@ -1438,8 +1438,16 @@ static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
2724    
2725     static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
2726     {
2727     - if (hpp)
2728     - dev_warn(&dev->dev, "PCI-X settings not supported\n");
2729     + int pos;
2730     +
2731     + if (!hpp)
2732     + return;
2733     +
2734     + pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
2735     + if (!pos)
2736     + return;
2737     +
2738     + dev_warn(&dev->dev, "PCI-X settings not supported\n");
2739     }
2740    
2741     static bool pcie_root_rcb_set(struct pci_dev *dev)
2742     @@ -1465,6 +1473,9 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
2743     if (!hpp)
2744     return;
2745    
2746     + if (!pci_is_pcie(dev))
2747     + return;
2748     +
2749     if (hpp->revision > 1) {
2750     dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
2751     hpp->revision);
2752     diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
2753     index 5d8151b43fbb..98eba9127a0b 100644
2754     --- a/drivers/pci/quirks.c
2755     +++ b/drivers/pci/quirks.c
2756     @@ -4088,12 +4088,14 @@ static int pci_quirk_amd_sb_acs(struct pci_dev *dev, u16 acs_flags)
2757     static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
2758     {
2759     /*
2760     - * Cavium devices matching this quirk do not perform peer-to-peer
2761     - * with other functions, allowing masking out these bits as if they
2762     - * were unimplemented in the ACS capability.
2763     + * Cavium root ports don't advertise an ACS capability. However,
2764     + * the RTL internally implements similar protection as if ACS had
2765     + * Request Redirection, Completion Redirection, Source Validation,
2766     + * and Upstream Forwarding features enabled. Assert that the
2767     + * hardware implements and enables equivalent ACS functionality for
2768     + * these flags.
2769     */
2770     - acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
2771     - PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
2772     + acs_flags &= ~(PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_SV | PCI_ACS_UF);
2773    
2774     return acs_flags ? 0 : 1;
2775     }
2776     diff --git a/drivers/pinctrl/sirf/pinctrl-atlas7.c b/drivers/pinctrl/sirf/pinctrl-atlas7.c
2777     index 7f3041697813..f714f67c4b64 100644
2778     --- a/drivers/pinctrl/sirf/pinctrl-atlas7.c
2779     +++ b/drivers/pinctrl/sirf/pinctrl-atlas7.c
2780     @@ -5420,14 +5420,15 @@ static int atlas7_pinmux_probe(struct platform_device *pdev)
2781     sys2pci_np = of_find_node_by_name(NULL, "sys2pci");
2782     if (!sys2pci_np)
2783     return -EINVAL;
2784     +
2785     ret = of_address_to_resource(sys2pci_np, 0, &res);
2786     + of_node_put(sys2pci_np);
2787     if (ret)
2788     return ret;
2789     +
2790     pmx->sys2pci_base = devm_ioremap_resource(&pdev->dev, &res);
2791     - if (IS_ERR(pmx->sys2pci_base)) {
2792     - of_node_put(sys2pci_np);
2793     + if (IS_ERR(pmx->sys2pci_base))
2794     return -ENOMEM;
2795     - }
2796    
2797     pmx->dev = &pdev->dev;
2798    
2799     diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
2800     index b7995474148c..8e281e47afec 100644
2801     --- a/drivers/spi/Kconfig
2802     +++ b/drivers/spi/Kconfig
2803     @@ -365,6 +365,7 @@ config SPI_FSL_SPI
2804     config SPI_FSL_DSPI
2805     tristate "Freescale DSPI controller"
2806     select REGMAP_MMIO
2807     + depends on HAS_DMA
2808     depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
2809     help
2810     This enables support for the Freescale DSPI controller in master
2811     diff --git a/drivers/staging/iio/cdc/ad7150.c b/drivers/staging/iio/cdc/ad7150.c
2812     index 5578a077fcfb..50a5b0c2cc7b 100644
2813     --- a/drivers/staging/iio/cdc/ad7150.c
2814     +++ b/drivers/staging/iio/cdc/ad7150.c
2815     @@ -274,7 +274,7 @@ static int ad7150_write_event_config(struct iio_dev *indio_dev,
2816     error_ret:
2817     mutex_unlock(&chip->state_lock);
2818    
2819     - return 0;
2820     + return ret;
2821     }
2822    
2823     static int ad7150_read_event_value(struct iio_dev *indio_dev,
2824     diff --git a/drivers/staging/media/cec/cec-adap.c b/drivers/staging/media/cec/cec-adap.c
2825     index 057c9b5ab1e5..499d7bfe7147 100644
2826     --- a/drivers/staging/media/cec/cec-adap.c
2827     +++ b/drivers/staging/media/cec/cec-adap.c
2828     @@ -288,10 +288,10 @@ static void cec_data_cancel(struct cec_data *data)
2829    
2830     /* Mark it as an error */
2831     data->msg.tx_ts = ktime_get_ns();
2832     - data->msg.tx_status = CEC_TX_STATUS_ERROR |
2833     - CEC_TX_STATUS_MAX_RETRIES;
2834     + data->msg.tx_status |= CEC_TX_STATUS_ERROR |
2835     + CEC_TX_STATUS_MAX_RETRIES;
2836     + data->msg.tx_error_cnt++;
2837     data->attempts = 0;
2838     - data->msg.tx_error_cnt = 1;
2839     /* Queue transmitted message for monitoring purposes */
2840     cec_queue_msg_monitor(data->adap, &data->msg, 1);
2841    
2842     @@ -1062,6 +1062,8 @@ static int cec_config_thread_func(void *arg)
2843     for (i = 1; i < las->num_log_addrs; i++)
2844     las->log_addr[i] = CEC_LOG_ADDR_INVALID;
2845     }
2846     + for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
2847     + las->log_addr[i] = CEC_LOG_ADDR_INVALID;
2848     adap->is_configured = true;
2849     adap->is_configuring = false;
2850     cec_post_state_event(adap);
2851     @@ -1079,8 +1081,6 @@ static int cec_config_thread_func(void *arg)
2852     cec_report_features(adap, i);
2853     cec_report_phys_addr(adap, i);
2854     }
2855     - for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
2856     - las->log_addr[i] = CEC_LOG_ADDR_INVALID;
2857     mutex_lock(&adap->lock);
2858     adap->kthread_config = NULL;
2859     mutex_unlock(&adap->lock);
2860     @@ -1557,9 +1557,9 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
2861     }
2862    
2863     case CEC_MSG_GIVE_FEATURES:
2864     - if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
2865     - return cec_report_features(adap, la_idx);
2866     - return 0;
2867     + if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
2868     + return cec_feature_abort(adap, msg);
2869     + return cec_report_features(adap, la_idx);
2870    
2871     default:
2872     /*
2873     diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
2874     index f3c9d18e9dc5..0d578297d9f9 100644
2875     --- a/drivers/target/iscsi/iscsi_target.c
2876     +++ b/drivers/target/iscsi/iscsi_target.c
2877     @@ -2104,12 +2104,14 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
2878    
2879     if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
2880     int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
2881     - if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
2882     + if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
2883     out_of_order_cmdsn = 1;
2884     - else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
2885     + } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
2886     + target_put_sess_cmd(&cmd->se_cmd);
2887     return 0;
2888     - else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
2889     + } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
2890     return -1;
2891     + }
2892     }
2893     iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
2894    
2895     diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
2896     index bacfa8f81be8..4c0782cb1e94 100644
2897     --- a/drivers/target/target_core_transport.c
2898     +++ b/drivers/target/target_core_transport.c
2899     @@ -1976,6 +1976,8 @@ static void target_restart_delayed_cmds(struct se_device *dev)
2900     list_del(&cmd->se_delayed_node);
2901     spin_unlock(&dev->delayed_cmd_lock);
2902    
2903     + cmd->transport_state |= CMD_T_SENT;
2904     +
2905     __target_execute_cmd(cmd, true);
2906    
2907     if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2908     @@ -2013,6 +2015,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
2909     pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2910     dev->dev_cur_ordered_id);
2911     }
2912     + cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2913     +
2914     restart:
2915     target_restart_delayed_cmds(dev);
2916     }
2917     diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
2918     index 6e29d053843d..9e36632b6f0e 100644
2919     --- a/drivers/vhost/scsi.c
2920     +++ b/drivers/vhost/scsi.c
2921     @@ -693,6 +693,7 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
2922     struct scatterlist *sg, int sg_count)
2923     {
2924     size_t off = iter->iov_offset;
2925     + struct scatterlist *p = sg;
2926     int i, ret;
2927    
2928     for (i = 0; i < iter->nr_segs; i++) {
2929     @@ -701,8 +702,8 @@ vhost_scsi_iov_to_sgl(struct vhost_scsi_cmd *cmd, bool write,
2930    
2931     ret = vhost_scsi_map_to_sgl(cmd, base, len, sg, write);
2932     if (ret < 0) {
2933     - for (i = 0; i < sg_count; i++) {
2934     - struct page *page = sg_page(&sg[i]);
2935     + while (p < sg) {
2936     + struct page *page = sg_page(p++);
2937     if (page)
2938     put_page(page);
2939     }
2940     diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
2941     index 1e8be12ebb55..0a3c6762df1b 100644
2942     --- a/drivers/xen/xenbus/xenbus_dev_frontend.c
2943     +++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
2944     @@ -316,7 +316,7 @@ static int xenbus_write_transaction(unsigned msg_type,
2945     rc = -ENOMEM;
2946     goto out;
2947     }
2948     - } else if (msg_type == XS_TRANSACTION_END) {
2949     + } else if (u->u.msg.tx_id != 0) {
2950     list_for_each_entry(trans, &u->transactions, list)
2951     if (trans->handle.id == u->u.msg.tx_id)
2952     break;
2953     diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
2954     index 30ca770c5e0b..f8ab4a66acaf 100644
2955     --- a/fs/9p/vfs_inode.c
2956     +++ b/fs/9p/vfs_inode.c
2957     @@ -483,6 +483,9 @@ static int v9fs_test_inode(struct inode *inode, void *data)
2958    
2959     if (v9inode->qid.type != st->qid.type)
2960     return 0;
2961     +
2962     + if (v9inode->qid.path != st->qid.path)
2963     + return 0;
2964     return 1;
2965     }
2966    
2967     diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
2968     index afaa4b6de801..c3dd0d42bb3a 100644
2969     --- a/fs/9p/vfs_inode_dotl.c
2970     +++ b/fs/9p/vfs_inode_dotl.c
2971     @@ -87,6 +87,9 @@ static int v9fs_test_inode_dotl(struct inode *inode, void *data)
2972    
2973     if (v9inode->qid.type != st->qid.type)
2974     return 0;
2975     +
2976     + if (v9inode->qid.path != st->qid.path)
2977     + return 0;
2978     return 1;
2979     }
2980    
2981     diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
2982     index 5db6c8d745ea..4c71dba90120 100644
2983     --- a/fs/autofs4/waitq.c
2984     +++ b/fs/autofs4/waitq.c
2985     @@ -87,7 +87,8 @@ static int autofs4_write(struct autofs_sb_info *sbi,
2986     spin_unlock_irqrestore(&current->sighand->siglock, flags);
2987     }
2988    
2989     - return (bytes > 0);
2990     + /* if 'wr' returned 0 (impossible) we assume -EIO (safe) */
2991     + return bytes == 0 ? 0 : wr < 0 ? wr : -EIO;
2992     }
2993    
2994     static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
2995     @@ -101,6 +102,7 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
2996     } pkt;
2997     struct file *pipe = NULL;
2998     size_t pktsz;
2999     + int ret;
3000    
3001     pr_debug("wait id = 0x%08lx, name = %.*s, type=%d\n",
3002     (unsigned long) wq->wait_queue_token,
3003     @@ -175,7 +177,18 @@ static void autofs4_notify_daemon(struct autofs_sb_info *sbi,
3004     mutex_unlock(&sbi->wq_mutex);
3005    
3006     if (autofs4_write(sbi, pipe, &pkt, pktsz))
3007     + switch (ret = autofs4_write(sbi, pipe, &pkt, pktsz)) {
3008     + case 0:
3009     + break;
3010     + case -ENOMEM:
3011     + case -ERESTARTSYS:
3012     + /* Just fail this one */
3013     + autofs4_wait_release(sbi, wq->wait_queue_token, ret);
3014     + break;
3015     + default:
3016     autofs4_catatonic_mode(sbi);
3017     + break;
3018     + }
3019     fput(pipe);
3020     }
3021    
3022     diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
3023     index 7fc89e4adb41..83bb2f2aa83c 100644
3024     --- a/fs/btrfs/uuid-tree.c
3025     +++ b/fs/btrfs/uuid-tree.c
3026     @@ -351,7 +351,5 @@ int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info,
3027    
3028     out:
3029     btrfs_free_path(path);
3030     - if (ret)
3031     - btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
3032     - return 0;
3033     + return ret;
3034     }
3035     diff --git a/fs/crypto/crypto.c b/fs/crypto/crypto.c
3036     index 61cfccea77bc..73de1446c8d4 100644
3037     --- a/fs/crypto/crypto.c
3038     +++ b/fs/crypto/crypto.c
3039     @@ -484,9 +484,6 @@ int fscrypt_initialize(void)
3040     {
3041     int i, res = -ENOMEM;
3042    
3043     - if (fscrypt_bounce_page_pool)
3044     - return 0;
3045     -
3046     mutex_lock(&fscrypt_init_mutex);
3047     if (fscrypt_bounce_page_pool)
3048     goto already_initialized;
3049     diff --git a/fs/crypto/fname.c b/fs/crypto/fname.c
3050     index d1bbdc9dda76..e14bb7b67e9c 100644
3051     --- a/fs/crypto/fname.c
3052     +++ b/fs/crypto/fname.c
3053     @@ -332,7 +332,7 @@ int fscrypt_fname_usr_to_disk(struct inode *inode,
3054     * in a directory. Consequently, a user space name cannot be mapped to
3055     * a disk-space name
3056     */
3057     - return -EACCES;
3058     + return -ENOKEY;
3059     }
3060     EXPORT_SYMBOL(fscrypt_fname_usr_to_disk);
3061    
3062     @@ -367,7 +367,7 @@ int fscrypt_setup_filename(struct inode *dir, const struct qstr *iname,
3063     return 0;
3064     }
3065     if (!lookup)
3066     - return -EACCES;
3067     + return -ENOKEY;
3068    
3069     /*
3070     * We don't have the key and we are doing a lookup; decode the
3071     diff --git a/fs/crypto/policy.c b/fs/crypto/policy.c
3072     index bb4e209bd809..c160d2d0e18d 100644
3073     --- a/fs/crypto/policy.c
3074     +++ b/fs/crypto/policy.c
3075     @@ -113,7 +113,7 @@ int fscrypt_process_policy(struct file *filp,
3076    
3077     if (!inode_has_encryption_context(inode)) {
3078     if (!S_ISDIR(inode->i_mode))
3079     - ret = -EINVAL;
3080     + ret = -ENOTDIR;
3081     else if (!inode->i_sb->s_cop->empty_dir)
3082     ret = -EOPNOTSUPP;
3083     else if (!inode->i_sb->s_cop->empty_dir(inode))
3084     diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
3085     index 286f10b0363b..4f457d5c4933 100644
3086     --- a/fs/ecryptfs/messaging.c
3087     +++ b/fs/ecryptfs/messaging.c
3088     @@ -442,15 +442,16 @@ void ecryptfs_release_messaging(void)
3089     }
3090     if (ecryptfs_daemon_hash) {
3091     struct ecryptfs_daemon *daemon;
3092     + struct hlist_node *n;
3093     int i;
3094    
3095     mutex_lock(&ecryptfs_daemon_hash_mux);
3096     for (i = 0; i < (1 << ecryptfs_hash_bits); i++) {
3097     int rc;
3098    
3099     - hlist_for_each_entry(daemon,
3100     - &ecryptfs_daemon_hash[i],
3101     - euid_chain) {
3102     + hlist_for_each_entry_safe(daemon, n,
3103     + &ecryptfs_daemon_hash[i],
3104     + euid_chain) {
3105     rc = ecryptfs_exorcise_daemon(daemon);
3106     if (rc)
3107     printk(KERN_ERR "%s: Error whilst "
3108     diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
3109     index a3e0b3b7441d..a77cbc5b657b 100644
3110     --- a/fs/ext4/extents.c
3111     +++ b/fs/ext4/extents.c
3112     @@ -4803,7 +4803,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
3113     }
3114    
3115     if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3116     - offset + len > i_size_read(inode)) {
3117     + (offset + len > i_size_read(inode) ||
3118     + offset + len > EXT4_I(inode)->i_disksize)) {
3119     new_size = offset + len;
3120     ret = inode_newsize_ok(inode, new_size);
3121     if (ret)
3122     @@ -4974,7 +4975,8 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
3123     }
3124    
3125     if (!(mode & FALLOC_FL_KEEP_SIZE) &&
3126     - offset + len > i_size_read(inode)) {
3127     + (offset + len > i_size_read(inode) ||
3128     + offset + len > EXT4_I(inode)->i_disksize)) {
3129     new_size = offset + len;
3130     ret = inode_newsize_ok(inode, new_size);
3131     if (ret)
3132     diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
3133     index 170421edfdfe..2d94e8524839 100644
3134     --- a/fs/ext4/ialloc.c
3135     +++ b/fs/ext4/ialloc.c
3136     @@ -771,7 +771,7 @@ struct inode *__ext4_new_inode(handle_t *handle, struct inode *dir,
3137     if (err)
3138     return ERR_PTR(err);
3139     if (!fscrypt_has_encryption_key(dir))
3140     - return ERR_PTR(-EPERM);
3141     + return ERR_PTR(-ENOKEY);
3142     if (!handle)
3143     nblocks += EXT4_DATA_TRANS_BLOCKS(dir->i_sb);
3144     encrypt = 1;
3145     diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
3146     index 00b8a5a66961..4438b93f6fd6 100644
3147     --- a/fs/ext4/namei.c
3148     +++ b/fs/ext4/namei.c
3149     @@ -1378,6 +1378,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
3150     return NULL;
3151    
3152     retval = ext4_fname_setup_filename(dir, d_name, 1, &fname);
3153     + if (retval == -ENOENT)
3154     + return NULL;
3155     if (retval)
3156     return ERR_PTR(retval);
3157    
3158     @@ -3090,7 +3092,7 @@ static int ext4_symlink(struct inode *dir,
3159     if (err)
3160     return err;
3161     if (!fscrypt_has_encryption_key(dir))
3162     - return -EPERM;
3163     + return -ENOKEY;
3164     disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
3165     sizeof(struct fscrypt_symlink_data));
3166     sd = kzalloc(disk_link.len, GFP_KERNEL);
3167     diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
3168     index 11f3717ce481..8add4e8bab99 100644
3169     --- a/fs/f2fs/dir.c
3170     +++ b/fs/f2fs/dir.c
3171     @@ -277,7 +277,10 @@ struct f2fs_dir_entry *f2fs_find_entry(struct inode *dir,
3172    
3173     err = fscrypt_setup_filename(dir, child, 1, &fname);
3174     if (err) {
3175     - *res_page = ERR_PTR(err);
3176     + if (err == -ENOENT)
3177     + *res_page = NULL;
3178     + else
3179     + *res_page = ERR_PTR(err);
3180     return NULL;
3181     }
3182    
3183     diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
3184     index 08d7dc99042e..8556fe1ccb8a 100644
3185     --- a/fs/f2fs/namei.c
3186     +++ b/fs/f2fs/namei.c
3187     @@ -403,7 +403,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
3188     return err;
3189    
3190     if (!fscrypt_has_encryption_key(dir))
3191     - return -EPERM;
3192     + return -ENOKEY;
3193    
3194     disk_link.len = (fscrypt_fname_encrypted_size(dir, len) +
3195     sizeof(struct fscrypt_symlink_data));
3196     @@ -447,7 +447,7 @@ static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
3197     goto err_out;
3198    
3199     if (!fscrypt_has_encryption_key(inode)) {
3200     - err = -EPERM;
3201     + err = -ENOKEY;
3202     goto err_out;
3203     }
3204    
3205     diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h
3206     index 0ac4c1f73fbd..25177e6bd603 100644
3207     --- a/fs/isofs/isofs.h
3208     +++ b/fs/isofs/isofs.h
3209     @@ -103,7 +103,7 @@ static inline unsigned int isonum_733(char *p)
3210     /* Ignore bigendian datum due to broken mastering programs */
3211     return get_unaligned_le32(p);
3212     }
3213     -extern int iso_date(char *, int);
3214     +extern int iso_date(u8 *, int);
3215    
3216     struct inode; /* To make gcc happy */
3217    
3218     diff --git a/fs/isofs/rock.h b/fs/isofs/rock.h
3219     index ed09e2b08637..f835976ce033 100644
3220     --- a/fs/isofs/rock.h
3221     +++ b/fs/isofs/rock.h
3222     @@ -65,7 +65,7 @@ struct RR_PL_s {
3223     };
3224    
3225     struct stamp {
3226     - char time[7];
3227     + __u8 time[7]; /* actually 6 unsigned, 1 signed */
3228     } __attribute__ ((packed));
3229    
3230     struct RR_TF_s {
3231     diff --git a/fs/isofs/util.c b/fs/isofs/util.c
3232     index 005a15cfd30a..37860fea364d 100644
3233     --- a/fs/isofs/util.c
3234     +++ b/fs/isofs/util.c
3235     @@ -15,7 +15,7 @@
3236     * to GMT. Thus we should always be correct.
3237     */
3238    
3239     -int iso_date(char * p, int flag)
3240     +int iso_date(u8 *p, int flag)
3241     {
3242     int year, month, day, hour, minute, second, tz;
3243     int crtime;
3244     diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
3245     index fc4084ef4736..9d373247222c 100644
3246     --- a/fs/lockd/svc.c
3247     +++ b/fs/lockd/svc.c
3248     @@ -365,6 +365,7 @@ static int lockd_start_svc(struct svc_serv *serv)
3249     printk(KERN_WARNING
3250     "lockd_up: svc_rqst allocation failed, error=%d\n",
3251     error);
3252     + lockd_unregister_notifiers();
3253     goto out_rqst;
3254     }
3255    
3256     @@ -455,13 +456,16 @@ int lockd_up(struct net *net)
3257     }
3258    
3259     error = lockd_up_net(serv, net);
3260     - if (error < 0)
3261     - goto err_net;
3262     + if (error < 0) {
3263     + lockd_unregister_notifiers();
3264     + goto err_put;
3265     + }
3266    
3267     error = lockd_start_svc(serv);
3268     - if (error < 0)
3269     - goto err_start;
3270     -
3271     + if (error < 0) {
3272     + lockd_down_net(serv, net);
3273     + goto err_put;
3274     + }
3275     nlmsvc_users++;
3276     /*
3277     * Note: svc_serv structures have an initial use count of 1,
3278     @@ -472,12 +476,6 @@ int lockd_up(struct net *net)
3279     err_create:
3280     mutex_unlock(&nlmsvc_mutex);
3281     return error;
3282     -
3283     -err_start:
3284     - lockd_down_net(serv, net);
3285     -err_net:
3286     - lockd_unregister_notifiers();
3287     - goto err_put;
3288     }
3289     EXPORT_SYMBOL_GPL(lockd_up);
3290    
3291     diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
3292     index a53b8e0c896a..67845220fc27 100644
3293     --- a/fs/nfs/nfs4proc.c
3294     +++ b/fs/nfs/nfs4proc.c
3295     @@ -256,15 +256,12 @@ const u32 nfs4_fsinfo_bitmap[3] = { FATTR4_WORD0_MAXFILESIZE
3296     };
3297    
3298     const u32 nfs4_fs_locations_bitmap[3] = {
3299     - FATTR4_WORD0_TYPE
3300     - | FATTR4_WORD0_CHANGE
3301     + FATTR4_WORD0_CHANGE
3302     | FATTR4_WORD0_SIZE
3303     | FATTR4_WORD0_FSID
3304     | FATTR4_WORD0_FILEID
3305     | FATTR4_WORD0_FS_LOCATIONS,
3306     - FATTR4_WORD1_MODE
3307     - | FATTR4_WORD1_NUMLINKS
3308     - | FATTR4_WORD1_OWNER
3309     + FATTR4_WORD1_OWNER
3310     | FATTR4_WORD1_OWNER_GROUP
3311     | FATTR4_WORD1_RAWDEV
3312     | FATTR4_WORD1_SPACE_USED
3313     @@ -6678,9 +6675,7 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
3314     struct page *page)
3315     {
3316     struct nfs_server *server = NFS_SERVER(dir);
3317     - u32 bitmask[3] = {
3318     - [0] = FATTR4_WORD0_FSID | FATTR4_WORD0_FS_LOCATIONS,
3319     - };
3320     + u32 bitmask[3];
3321     struct nfs4_fs_locations_arg args = {
3322     .dir_fh = NFS_FH(dir),
3323     .name = name,
3324     @@ -6699,12 +6694,15 @@ static int _nfs4_proc_fs_locations(struct rpc_clnt *client, struct inode *dir,
3325    
3326     dprintk("%s: start\n", __func__);
3327    
3328     + bitmask[0] = nfs4_fattr_bitmap[0] | FATTR4_WORD0_FS_LOCATIONS;
3329     + bitmask[1] = nfs4_fattr_bitmap[1];
3330     +
3331     /* Ask for the fileid of the absent filesystem if mounted_on_fileid
3332     * is not supported */
3333     if (NFS_SERVER(dir)->attr_bitmask[1] & FATTR4_WORD1_MOUNTED_ON_FILEID)
3334     - bitmask[1] |= FATTR4_WORD1_MOUNTED_ON_FILEID;
3335     + bitmask[0] &= ~FATTR4_WORD0_FILEID;
3336     else
3337     - bitmask[0] |= FATTR4_WORD0_FILEID;
3338     + bitmask[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
3339    
3340     nfs_fattr_init(&fs_locations->fattr);
3341     fs_locations->server = server;
3342     diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h
3343     index cfb8f7ce5cf6..20cd8500452a 100644
3344     --- a/fs/nfs/nfs4trace.h
3345     +++ b/fs/nfs/nfs4trace.h
3346     @@ -201,17 +201,13 @@ DECLARE_EVENT_CLASS(nfs4_clientid_event,
3347     TP_ARGS(clp, error),
3348    
3349     TP_STRUCT__entry(
3350     - __string(dstaddr,
3351     - rpc_peeraddr2str(clp->cl_rpcclient,
3352     - RPC_DISPLAY_ADDR))
3353     + __string(dstaddr, clp->cl_hostname)
3354     __field(int, error)
3355     ),
3356    
3357     TP_fast_assign(
3358     __entry->error = error;
3359     - __assign_str(dstaddr,
3360     - rpc_peeraddr2str(clp->cl_rpcclient,
3361     - RPC_DISPLAY_ADDR));
3362     + __assign_str(dstaddr, clp->cl_hostname);
3363     ),
3364    
3365     TP_printk(
3366     @@ -1103,9 +1099,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
3367     __field(dev_t, dev)
3368     __field(u32, fhandle)
3369     __field(u64, fileid)
3370     - __string(dstaddr, clp ?
3371     - rpc_peeraddr2str(clp->cl_rpcclient,
3372     - RPC_DISPLAY_ADDR) : "unknown")
3373     + __string(dstaddr, clp ? clp->cl_hostname : "unknown")
3374     ),
3375    
3376     TP_fast_assign(
3377     @@ -1118,9 +1112,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_callback_event,
3378     __entry->fileid = 0;
3379     __entry->dev = 0;
3380     }
3381     - __assign_str(dstaddr, clp ?
3382     - rpc_peeraddr2str(clp->cl_rpcclient,
3383     - RPC_DISPLAY_ADDR) : "unknown")
3384     + __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
3385     ),
3386    
3387     TP_printk(
3388     @@ -1162,9 +1154,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
3389     __field(dev_t, dev)
3390     __field(u32, fhandle)
3391     __field(u64, fileid)
3392     - __string(dstaddr, clp ?
3393     - rpc_peeraddr2str(clp->cl_rpcclient,
3394     - RPC_DISPLAY_ADDR) : "unknown")
3395     + __string(dstaddr, clp ? clp->cl_hostname : "unknown")
3396     __field(int, stateid_seq)
3397     __field(u32, stateid_hash)
3398     ),
3399     @@ -1179,9 +1169,7 @@ DECLARE_EVENT_CLASS(nfs4_inode_stateid_callback_event,
3400     __entry->fileid = 0;
3401     __entry->dev = 0;
3402     }
3403     - __assign_str(dstaddr, clp ?
3404     - rpc_peeraddr2str(clp->cl_rpcclient,
3405     - RPC_DISPLAY_ADDR) : "unknown")
3406     + __assign_str(dstaddr, clp ? clp->cl_hostname : "unknown")
3407     __entry->stateid_seq =
3408     be32_to_cpu(stateid->seqid);
3409     __entry->stateid_hash =
3410     diff --git a/fs/nfs/super.c b/fs/nfs/super.c
3411     index ddce94ce8142..51bf1f9ab287 100644
3412     --- a/fs/nfs/super.c
3413     +++ b/fs/nfs/super.c
3414     @@ -1339,7 +1339,7 @@ static int nfs_parse_mount_options(char *raw,
3415     mnt->options |= NFS_OPTION_MIGRATION;
3416     break;
3417     case Opt_nomigration:
3418     - mnt->options &= NFS_OPTION_MIGRATION;
3419     + mnt->options &= ~NFS_OPTION_MIGRATION;
3420     break;
3421    
3422     /*
3423     diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
3424     index d35eb077330f..ec2a69dac536 100644
3425     --- a/fs/nfsd/nfs4state.c
3426     +++ b/fs/nfsd/nfs4state.c
3427     @@ -3967,7 +3967,8 @@ static struct nfs4_delegation *find_deleg_stateid(struct nfs4_client *cl, statei
3428     {
3429     struct nfs4_stid *ret;
3430    
3431     - ret = find_stateid_by_type(cl, s, NFS4_DELEG_STID);
3432     + ret = find_stateid_by_type(cl, s,
3433     + NFS4_DELEG_STID|NFS4_REVOKED_DELEG_STID);
3434     if (!ret)
3435     return NULL;
3436     return delegstateid(ret);
3437     @@ -3990,6 +3991,12 @@ nfs4_check_deleg(struct nfs4_client *cl, struct nfsd4_open *open,
3438     deleg = find_deleg_stateid(cl, &open->op_delegate_stateid);
3439     if (deleg == NULL)
3440     goto out;
3441     + if (deleg->dl_stid.sc_type == NFS4_REVOKED_DELEG_STID) {
3442     + nfs4_put_stid(&deleg->dl_stid);
3443     + if (cl->cl_minorversion)
3444     + status = nfserr_deleg_revoked;
3445     + goto out;
3446     + }
3447     flags = share_access_to_flags(open->op_share_access);
3448     status = nfs4_check_delegmode(deleg, flags);
3449     if (status) {
3450     @@ -4858,6 +4865,16 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
3451     struct nfs4_stid **s, struct nfsd_net *nn)
3452     {
3453     __be32 status;
3454     + bool return_revoked = false;
3455     +
3456     + /*
3457     + * only return revoked delegations if explicitly asked.
3458     + * otherwise we report revoked or bad_stateid status.
3459     + */
3460     + if (typemask & NFS4_REVOKED_DELEG_STID)
3461     + return_revoked = true;
3462     + else if (typemask & NFS4_DELEG_STID)
3463     + typemask |= NFS4_REVOKED_DELEG_STID;
3464    
3465     if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
3466     return nfserr_bad_stateid;
3467     @@ -4872,6 +4889,12 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
3468     *s = find_stateid_by_type(cstate->clp, stateid, typemask);
3469     if (!*s)
3470     return nfserr_bad_stateid;
3471     + if (((*s)->sc_type == NFS4_REVOKED_DELEG_STID) && !return_revoked) {
3472     + nfs4_put_stid(*s);
3473     + if (cstate->minorversion)
3474     + return nfserr_deleg_revoked;
3475     + return nfserr_bad_stateid;
3476     + }
3477     return nfs_ok;
3478     }
3479    
3480     diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
3481     index 7d18d62e8e07..36362d4bc344 100644
3482     --- a/fs/nilfs2/segment.c
3483     +++ b/fs/nilfs2/segment.c
3484     @@ -1956,8 +1956,6 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
3485     err, ii->vfs_inode.i_ino);
3486     return err;
3487     }
3488     - mark_buffer_dirty(ibh);
3489     - nilfs_mdt_mark_dirty(ifile);
3490     spin_lock(&nilfs->ns_inode_lock);
3491     if (likely(!ii->i_bh))
3492     ii->i_bh = ibh;
3493     @@ -1966,6 +1964,10 @@ static int nilfs_segctor_collect_dirty_files(struct nilfs_sc_info *sci,
3494     goto retry;
3495     }
3496    
3497     + // Always redirty the buffer to avoid race condition
3498     + mark_buffer_dirty(ii->i_bh);
3499     + nilfs_mdt_mark_dirty(ifile);
3500     +
3501     clear_bit(NILFS_I_QUEUED, &ii->i_state);
3502     set_bit(NILFS_I_BUSY, &ii->i_state);
3503     list_move_tail(&ii->i_dirty, &sci->sc_dirty_files);
3504     diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
3505     index 8a707f8a41c3..8a13e3903839 100644
3506     --- a/include/trace/events/sunrpc.h
3507     +++ b/include/trace/events/sunrpc.h
3508     @@ -455,20 +455,22 @@ TRACE_EVENT(svc_recv,
3509     TP_ARGS(rqst, status),
3510    
3511     TP_STRUCT__entry(
3512     - __field(struct sockaddr *, addr)
3513     __field(__be32, xid)
3514     __field(int, status)
3515     __field(unsigned long, flags)
3516     + __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
3517     ),
3518    
3519     TP_fast_assign(
3520     - __entry->addr = (struct sockaddr *)&rqst->rq_addr;
3521     __entry->xid = status > 0 ? rqst->rq_xid : 0;
3522     __entry->status = status;
3523     __entry->flags = rqst->rq_flags;
3524     + memcpy(__get_dynamic_array(addr),
3525     + &rqst->rq_addr, rqst->rq_addrlen);
3526     ),
3527    
3528     - TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s", __entry->addr,
3529     + TP_printk("addr=%pIScp xid=0x%x status=%d flags=%s",
3530     + (struct sockaddr *)__get_dynamic_array(addr),
3531     be32_to_cpu(__entry->xid), __entry->status,
3532     show_rqstp_flags(__entry->flags))
3533     );
3534     @@ -513,22 +515,23 @@ DECLARE_EVENT_CLASS(svc_rqst_status,
3535     TP_ARGS(rqst, status),
3536    
3537     TP_STRUCT__entry(
3538     - __field(struct sockaddr *, addr)
3539     __field(__be32, xid)
3540     - __field(int, dropme)
3541     __field(int, status)
3542     __field(unsigned long, flags)
3543     + __dynamic_array(unsigned char, addr, rqst->rq_addrlen)
3544     ),
3545    
3546     TP_fast_assign(
3547     - __entry->addr = (struct sockaddr *)&rqst->rq_addr;
3548     __entry->xid = rqst->rq_xid;
3549     __entry->status = status;
3550     __entry->flags = rqst->rq_flags;
3551     + memcpy(__get_dynamic_array(addr),
3552     + &rqst->rq_addr, rqst->rq_addrlen);
3553     ),
3554    
3555     TP_printk("addr=%pIScp rq_xid=0x%x status=%d flags=%s",
3556     - __entry->addr, be32_to_cpu(__entry->xid),
3557     + (struct sockaddr *)__get_dynamic_array(addr),
3558     + be32_to_cpu(__entry->xid),
3559     __entry->status, show_rqstp_flags(__entry->flags))
3560     );
3561    
3562     diff --git a/kernel/sched/core.c b/kernel/sched/core.c
3563     index 78181c03d9c7..e5066955cc3a 100644
3564     --- a/kernel/sched/core.c
3565     +++ b/kernel/sched/core.c
3566     @@ -507,8 +507,7 @@ void resched_cpu(int cpu)
3567     struct rq *rq = cpu_rq(cpu);
3568     unsigned long flags;
3569    
3570     - if (!raw_spin_trylock_irqsave(&rq->lock, flags))
3571     - return;
3572     + raw_spin_lock_irqsave(&rq->lock, flags);
3573     resched_curr(rq);
3574     raw_spin_unlock_irqrestore(&rq->lock, flags);
3575     }
3576     @@ -5878,6 +5877,12 @@ static int init_rootdomain(struct root_domain *rd)
3577     if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
3578     goto free_dlo_mask;
3579    
3580     +#ifdef HAVE_RT_PUSH_IPI
3581     + rd->rto_cpu = -1;
3582     + raw_spin_lock_init(&rd->rto_lock);
3583     + init_irq_work(&rd->rto_push_work, rto_push_irq_work_func);
3584     +#endif
3585     +
3586     init_dl_bw(&rd->dl_bw);
3587     if (cpudl_init(&rd->cpudl) != 0)
3588     goto free_dlo_mask;
3589     diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
3590     index f139f22ce30d..9c131168d933 100644
3591     --- a/kernel/sched/rt.c
3592     +++ b/kernel/sched/rt.c
3593     @@ -72,10 +72,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
3594     raw_spin_unlock(&rt_b->rt_runtime_lock);
3595     }
3596    
3597     -#if defined(CONFIG_SMP) && defined(HAVE_RT_PUSH_IPI)
3598     -static void push_irq_work_func(struct irq_work *work);
3599     -#endif
3600     -
3601     void init_rt_rq(struct rt_rq *rt_rq)
3602     {
3603     struct rt_prio_array *array;
3604     @@ -95,13 +91,6 @@ void init_rt_rq(struct rt_rq *rt_rq)
3605     rt_rq->rt_nr_migratory = 0;
3606     rt_rq->overloaded = 0;
3607     plist_head_init(&rt_rq->pushable_tasks);
3608     -
3609     -#ifdef HAVE_RT_PUSH_IPI
3610     - rt_rq->push_flags = 0;
3611     - rt_rq->push_cpu = nr_cpu_ids;
3612     - raw_spin_lock_init(&rt_rq->push_lock);
3613     - init_irq_work(&rt_rq->push_work, push_irq_work_func);
3614     -#endif
3615     #endif /* CONFIG_SMP */
3616     /* We start is dequeued state, because no RT tasks are queued */
3617     rt_rq->rt_queued = 0;
3618     @@ -1864,160 +1853,166 @@ static void push_rt_tasks(struct rq *rq)
3619     }
3620    
3621     #ifdef HAVE_RT_PUSH_IPI
3622     +
3623     /*
3624     - * The search for the next cpu always starts at rq->cpu and ends
3625     - * when we reach rq->cpu again. It will never return rq->cpu.
3626     - * This returns the next cpu to check, or nr_cpu_ids if the loop
3627     - * is complete.
3628     + * When a high priority task schedules out from a CPU and a lower priority
3629     + * task is scheduled in, a check is made to see if there's any RT tasks
3630     + * on other CPUs that are waiting to run because a higher priority RT task
3631     + * is currently running on its CPU. In this case, the CPU with multiple RT
3632     + * tasks queued on it (overloaded) needs to be notified that a CPU has opened
3633     + * up that may be able to run one of its non-running queued RT tasks.
3634     + *
3635     + * All CPUs with overloaded RT tasks need to be notified as there is currently
3636     + * no way to know which of these CPUs have the highest priority task waiting
3637     + * to run. Instead of trying to take a spinlock on each of these CPUs,
3638     + * which has shown to cause large latency when done on machines with many
3639     + * CPUs, sending an IPI to the CPUs to have them push off the overloaded
3640     + * RT tasks waiting to run.
3641     + *
3642     + * Just sending an IPI to each of the CPUs is also an issue, as on large
3643     + * count CPU machines, this can cause an IPI storm on a CPU, especially
3644     + * if its the only CPU with multiple RT tasks queued, and a large number
3645     + * of CPUs scheduling a lower priority task at the same time.
3646     + *
3647     + * Each root domain has its own irq work function that can iterate over
3648     + * all CPUs with RT overloaded tasks. Since all CPUs with overloaded RT
3649     + * tassk must be checked if there's one or many CPUs that are lowering
3650     + * their priority, there's a single irq work iterator that will try to
3651     + * push off RT tasks that are waiting to run.
3652     + *
3653     + * When a CPU schedules a lower priority task, it will kick off the
3654     + * irq work iterator that will jump to each CPU with overloaded RT tasks.
3655     + * As it only takes the first CPU that schedules a lower priority task
3656     + * to start the process, the rto_start variable is incremented and if
3657     + * the atomic result is one, then that CPU will try to take the rto_lock.
3658     + * This prevents high contention on the lock as the process handles all
3659     + * CPUs scheduling lower priority tasks.
3660     + *
3661     + * All CPUs that are scheduling a lower priority task will increment the
3662     + * rt_loop_next variable. This will make sure that the irq work iterator
3663     + * checks all RT overloaded CPUs whenever a CPU schedules a new lower
3664     + * priority task, even if the iterator is in the middle of a scan. Incrementing
3665     + * the rt_loop_next will cause the iterator to perform another scan.
3666     *
3667     - * rq->rt.push_cpu holds the last cpu returned by this function,
3668     - * or if this is the first instance, it must hold rq->cpu.
3669     */
3670     static int rto_next_cpu(struct rq *rq)
3671     {
3672     - int prev_cpu = rq->rt.push_cpu;
3673     + struct root_domain *rd = rq->rd;
3674     + int next;
3675     int cpu;
3676    
3677     - cpu = cpumask_next(prev_cpu, rq->rd->rto_mask);
3678     -
3679     /*
3680     - * If the previous cpu is less than the rq's CPU, then it already
3681     - * passed the end of the mask, and has started from the beginning.
3682     - * We end if the next CPU is greater or equal to rq's CPU.
3683     + * When starting the IPI RT pushing, the rto_cpu is set to -1,
3684     + * rt_next_cpu() will simply return the first CPU found in
3685     + * the rto_mask.
3686     + *
3687     + * If rto_next_cpu() is called with rto_cpu is a valid cpu, it
3688     + * will return the next CPU found in the rto_mask.
3689     + *
3690     + * If there are no more CPUs left in the rto_mask, then a check is made
3691     + * against rto_loop and rto_loop_next. rto_loop is only updated with
3692     + * the rto_lock held, but any CPU may increment the rto_loop_next
3693     + * without any locking.
3694     */
3695     - if (prev_cpu < rq->cpu) {
3696     - if (cpu >= rq->cpu)
3697     - return nr_cpu_ids;
3698     + for (;;) {
3699    
3700     - } else if (cpu >= nr_cpu_ids) {
3701     - /*
3702     - * We passed the end of the mask, start at the beginning.
3703     - * If the result is greater or equal to the rq's CPU, then
3704     - * the loop is finished.
3705     - */
3706     - cpu = cpumask_first(rq->rd->rto_mask);
3707     - if (cpu >= rq->cpu)
3708     - return nr_cpu_ids;
3709     - }
3710     - rq->rt.push_cpu = cpu;
3711     + /* When rto_cpu is -1 this acts like cpumask_first() */
3712     + cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
3713    
3714     - /* Return cpu to let the caller know if the loop is finished or not */
3715     - return cpu;
3716     -}
3717     + rd->rto_cpu = cpu;
3718    
3719     -static int find_next_push_cpu(struct rq *rq)
3720     -{
3721     - struct rq *next_rq;
3722     - int cpu;
3723     + if (cpu < nr_cpu_ids)
3724     + return cpu;
3725    
3726     - while (1) {
3727     - cpu = rto_next_cpu(rq);
3728     - if (cpu >= nr_cpu_ids)
3729     - break;
3730     - next_rq = cpu_rq(cpu);
3731     + rd->rto_cpu = -1;
3732    
3733     - /* Make sure the next rq can push to this rq */
3734     - if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr)
3735     + /*
3736     + * ACQUIRE ensures we see the @rto_mask changes
3737     + * made prior to the @next value observed.
3738     + *
3739     + * Matches WMB in rt_set_overload().
3740     + */
3741     + next = atomic_read_acquire(&rd->rto_loop_next);
3742     +
3743     + if (rd->rto_loop == next)
3744     break;
3745     +
3746     + rd->rto_loop = next;
3747     }
3748    
3749     - return cpu;
3750     + return -1;
3751     +}
3752     +
3753     +static inline bool rto_start_trylock(atomic_t *v)
3754     +{
3755     + return !atomic_cmpxchg_acquire(v, 0, 1);
3756     }
3757    
3758     -#define RT_PUSH_IPI_EXECUTING 1
3759     -#define RT_PUSH_IPI_RESTART 2
3760     +static inline void rto_start_unlock(atomic_t *v)
3761     +{
3762     + atomic_set_release(v, 0);
3763     +}
3764    
3765     static void tell_cpu_to_push(struct rq *rq)
3766     {
3767     - int cpu;
3768     + int cpu = -1;
3769    
3770     - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
3771     - raw_spin_lock(&rq->rt.push_lock);
3772     - /* Make sure it's still executing */
3773     - if (rq->rt.push_flags & RT_PUSH_IPI_EXECUTING) {
3774     - /*
3775     - * Tell the IPI to restart the loop as things have
3776     - * changed since it started.
3777     - */
3778     - rq->rt.push_flags |= RT_PUSH_IPI_RESTART;
3779     - raw_spin_unlock(&rq->rt.push_lock);
3780     - return;
3781     - }
3782     - raw_spin_unlock(&rq->rt.push_lock);
3783     - }
3784     + /* Keep the loop going if the IPI is currently active */
3785     + atomic_inc(&rq->rd->rto_loop_next);
3786    
3787     - /* When here, there's no IPI going around */
3788     -
3789     - rq->rt.push_cpu = rq->cpu;
3790     - cpu = find_next_push_cpu(rq);
3791     - if (cpu >= nr_cpu_ids)
3792     + /* Only one CPU can initiate a loop at a time */
3793     + if (!rto_start_trylock(&rq->rd->rto_loop_start))
3794     return;
3795    
3796     - rq->rt.push_flags = RT_PUSH_IPI_EXECUTING;
3797     + raw_spin_lock(&rq->rd->rto_lock);
3798     +
3799     + /*
3800     + * The rto_cpu is updated under the lock, if it has a valid cpu
3801     + * then the IPI is still running and will continue due to the
3802     + * update to loop_next, and nothing needs to be done here.
3803     + * Otherwise it is finishing up and an ipi needs to be sent.
3804     + */
3805     + if (rq->rd->rto_cpu < 0)
3806     + cpu = rto_next_cpu(rq);
3807     +
3808     + raw_spin_unlock(&rq->rd->rto_lock);
3809    
3810     - irq_work_queue_on(&rq->rt.push_work, cpu);
3811     + rto_start_unlock(&rq->rd->rto_loop_start);
3812     +
3813     + if (cpu >= 0)
3814     + irq_work_queue_on(&rq->rd->rto_push_work, cpu);
3815     }
3816    
3817     /* Called from hardirq context */
3818     -static void try_to_push_tasks(void *arg)
3819     +void rto_push_irq_work_func(struct irq_work *work)
3820     {
3821     - struct rt_rq *rt_rq = arg;
3822     - struct rq *rq, *src_rq;
3823     - int this_cpu;
3824     + struct rq *rq;
3825     int cpu;
3826    
3827     - this_cpu = rt_rq->push_cpu;
3828     + rq = this_rq();
3829    
3830     - /* Paranoid check */
3831     - BUG_ON(this_cpu != smp_processor_id());
3832     -
3833     - rq = cpu_rq(this_cpu);
3834     - src_rq = rq_of_rt_rq(rt_rq);
3835     -
3836     -again:
3837     + /*
3838     + * We do not need to grab the lock to check for has_pushable_tasks.
3839     + * When it gets updated, a check is made if a push is possible.
3840     + */
3841     if (has_pushable_tasks(rq)) {
3842     raw_spin_lock(&rq->lock);
3843     - push_rt_task(rq);
3844     + push_rt_tasks(rq);
3845     raw_spin_unlock(&rq->lock);
3846     }
3847    
3848     - /* Pass the IPI to the next rt overloaded queue */
3849     - raw_spin_lock(&rt_rq->push_lock);
3850     - /*
3851     - * If the source queue changed since the IPI went out,
3852     - * we need to restart the search from that CPU again.
3853     - */
3854     - if (rt_rq->push_flags & RT_PUSH_IPI_RESTART) {
3855     - rt_rq->push_flags &= ~RT_PUSH_IPI_RESTART;
3856     - rt_rq->push_cpu = src_rq->cpu;
3857     - }
3858     + raw_spin_lock(&rq->rd->rto_lock);
3859    
3860     - cpu = find_next_push_cpu(src_rq);
3861     + /* Pass the IPI to the next rt overloaded queue */
3862     + cpu = rto_next_cpu(rq);
3863    
3864     - if (cpu >= nr_cpu_ids)
3865     - rt_rq->push_flags &= ~RT_PUSH_IPI_EXECUTING;
3866     - raw_spin_unlock(&rt_rq->push_lock);
3867     + raw_spin_unlock(&rq->rd->rto_lock);
3868    
3869     - if (cpu >= nr_cpu_ids)
3870     + if (cpu < 0)
3871     return;
3872    
3873     - /*
3874     - * It is possible that a restart caused this CPU to be
3875     - * chosen again. Don't bother with an IPI, just see if we
3876     - * have more to push.
3877     - */
3878     - if (unlikely(cpu == rq->cpu))
3879     - goto again;
3880     -
3881     /* Try the next RT overloaded CPU */
3882     - irq_work_queue_on(&rt_rq->push_work, cpu);
3883     -}
3884     -
3885     -static void push_irq_work_func(struct irq_work *work)
3886     -{
3887     - struct rt_rq *rt_rq = container_of(work, struct rt_rq, push_work);
3888     -
3889     - try_to_push_tasks(rt_rq);
3890     + irq_work_queue_on(&rq->rd->rto_push_work, cpu);
3891     }
3892     #endif /* HAVE_RT_PUSH_IPI */
3893    
3894     diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
3895     index ad77d666583c..cff985feb6e7 100644
3896     --- a/kernel/sched/sched.h
3897     +++ b/kernel/sched/sched.h
3898     @@ -463,7 +463,7 @@ static inline int rt_bandwidth_enabled(void)
3899     }
3900    
3901     /* RT IPI pull logic requires IRQ_WORK */
3902     -#ifdef CONFIG_IRQ_WORK
3903     +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_SMP)
3904     # define HAVE_RT_PUSH_IPI
3905     #endif
3906    
3907     @@ -485,12 +485,6 @@ struct rt_rq {
3908     unsigned long rt_nr_total;
3909     int overloaded;
3910     struct plist_head pushable_tasks;
3911     -#ifdef HAVE_RT_PUSH_IPI
3912     - int push_flags;
3913     - int push_cpu;
3914     - struct irq_work push_work;
3915     - raw_spinlock_t push_lock;
3916     -#endif
3917     #endif /* CONFIG_SMP */
3918     int rt_queued;
3919    
3920     @@ -572,6 +566,19 @@ struct root_domain {
3921     struct dl_bw dl_bw;
3922     struct cpudl cpudl;
3923    
3924     +#ifdef HAVE_RT_PUSH_IPI
3925     + /*
3926     + * For IPI pull requests, loop across the rto_mask.
3927     + */
3928     + struct irq_work rto_push_work;
3929     + raw_spinlock_t rto_lock;
3930     + /* These are only updated and read within rto_lock */
3931     + int rto_loop;
3932     + int rto_cpu;
3933     + /* These atomics are updated outside of a lock */
3934     + atomic_t rto_loop_next;
3935     + atomic_t rto_loop_start;
3936     +#endif
3937     /*
3938     * The "RT overload" flag: it gets set if a CPU has more than
3939     * one runnable RT task.
3940     @@ -584,6 +591,9 @@ struct root_domain {
3941    
3942     extern struct root_domain def_root_domain;
3943    
3944     +#ifdef HAVE_RT_PUSH_IPI
3945     +extern void rto_push_irq_work_func(struct irq_work *work);
3946     +#endif
3947     #endif /* CONFIG_SMP */
3948    
3949     /*
3950     diff --git a/lib/mpi/mpi-pow.c b/lib/mpi/mpi-pow.c
3951     index e24388a863a7..468fb7cd1221 100644
3952     --- a/lib/mpi/mpi-pow.c
3953     +++ b/lib/mpi/mpi-pow.c
3954     @@ -26,6 +26,7 @@
3955     * however I decided to publish this code under the plain GPL.
3956     */
3957    
3958     +#include <linux/sched.h>
3959     #include <linux/string.h>
3960     #include "mpi-internal.h"
3961     #include "longlong.h"
3962     @@ -256,6 +257,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
3963     }
3964     e <<= 1;
3965     c--;
3966     + cond_resched();
3967     }
3968    
3969     i--;
3970     diff --git a/net/9p/client.c b/net/9p/client.c
3971     index cf129fec7329..1fd60190177e 100644
3972     --- a/net/9p/client.c
3973     +++ b/net/9p/client.c
3974     @@ -749,8 +749,7 @@ p9_client_rpc(struct p9_client *c, int8_t type, const char *fmt, ...)
3975     }
3976     again:
3977     /* Wait for the response */
3978     - err = wait_event_interruptible(*req->wq,
3979     - req->status >= REQ_STATUS_RCVD);
3980     + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
3981    
3982     /*
3983     * Make sure our req is coherent with regard to updates in other
3984     diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
3985     index f24b25c25106..f3a4efcf1456 100644
3986     --- a/net/9p/trans_virtio.c
3987     +++ b/net/9p/trans_virtio.c
3988     @@ -286,8 +286,8 @@ p9_virtio_request(struct p9_client *client, struct p9_req_t *req)
3989     if (err == -ENOSPC) {
3990     chan->ring_bufs_avail = 0;
3991     spin_unlock_irqrestore(&chan->lock, flags);
3992     - err = wait_event_interruptible(*chan->vc_wq,
3993     - chan->ring_bufs_avail);
3994     + err = wait_event_killable(*chan->vc_wq,
3995     + chan->ring_bufs_avail);
3996     if (err == -ERESTARTSYS)
3997     return err;
3998    
3999     @@ -327,7 +327,7 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
4000     * Other zc request to finish here
4001     */
4002     if (atomic_read(&vp_pinned) >= chan->p9_max_pages) {
4003     - err = wait_event_interruptible(vp_wq,
4004     + err = wait_event_killable(vp_wq,
4005     (atomic_read(&vp_pinned) < chan->p9_max_pages));
4006     if (err == -ERESTARTSYS)
4007     return err;
4008     @@ -471,8 +471,8 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
4009     if (err == -ENOSPC) {
4010     chan->ring_bufs_avail = 0;
4011     spin_unlock_irqrestore(&chan->lock, flags);
4012     - err = wait_event_interruptible(*chan->vc_wq,
4013     - chan->ring_bufs_avail);
4014     + err = wait_event_killable(*chan->vc_wq,
4015     + chan->ring_bufs_avail);
4016     if (err == -ERESTARTSYS)
4017     goto err_out;
4018    
4019     @@ -489,8 +489,7 @@ p9_virtio_zc_request(struct p9_client *client, struct p9_req_t *req,
4020     virtqueue_kick(chan->vq);
4021     spin_unlock_irqrestore(&chan->lock, flags);
4022     p9_debug(P9_DEBUG_TRANS, "virtio request kicked\n");
4023     - err = wait_event_interruptible(*req->wq,
4024     - req->status >= REQ_STATUS_RCVD);
4025     + err = wait_event_killable(*req->wq, req->status >= REQ_STATUS_RCVD);
4026     /*
4027     * Non kernel buffers are pinned, unpin them
4028     */
4029     diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
4030     index 292e33bd916e..5f3a627afcc6 100644
4031     --- a/net/ceph/crypto.c
4032     +++ b/net/ceph/crypto.c
4033     @@ -34,7 +34,9 @@ static int set_secret(struct ceph_crypto_key *key, void *buf)
4034     return -ENOTSUPP;
4035     }
4036    
4037     - WARN_ON(!key->len);
4038     + if (!key->len)
4039     + return -EINVAL;
4040     +
4041     key->key = kmemdup(buf, key->len, GFP_NOIO);
4042     if (!key->key) {
4043     ret = -ENOMEM;
4044     diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
4045     index 4d37bdcbc2d5..551dd393ceec 100644
4046     --- a/net/ipv4/ip_sockglue.c
4047     +++ b/net/ipv4/ip_sockglue.c
4048     @@ -819,6 +819,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
4049     {
4050     struct ip_mreqn mreq;
4051     struct net_device *dev = NULL;
4052     + int midx;
4053    
4054     if (sk->sk_type == SOCK_STREAM)
4055     goto e_inval;
4056     @@ -863,11 +864,15 @@ static int do_ip_setsockopt(struct sock *sk, int level,
4057     err = -EADDRNOTAVAIL;
4058     if (!dev)
4059     break;
4060     +
4061     + midx = l3mdev_master_ifindex(dev);
4062     +
4063     dev_put(dev);
4064    
4065     err = -EINVAL;
4066     if (sk->sk_bound_dev_if &&
4067     - mreq.imr_ifindex != sk->sk_bound_dev_if)
4068     + mreq.imr_ifindex != sk->sk_bound_dev_if &&
4069     + (!midx || midx != sk->sk_bound_dev_if))
4070     break;
4071    
4072     inet->mc_index = mreq.imr_ifindex;
4073     diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
4074     index 636ec56f5f50..38bee173dc2b 100644
4075     --- a/net/ipv6/ipv6_sockglue.c
4076     +++ b/net/ipv6/ipv6_sockglue.c
4077     @@ -585,16 +585,24 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
4078    
4079     if (val) {
4080     struct net_device *dev;
4081     + int midx;
4082    
4083     - if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != val)
4084     - goto e_inval;
4085     + rcu_read_lock();
4086    
4087     - dev = dev_get_by_index(net, val);
4088     + dev = dev_get_by_index_rcu(net, val);
4089     if (!dev) {
4090     + rcu_read_unlock();
4091     retv = -ENODEV;
4092     break;
4093     }
4094     - dev_put(dev);
4095     + midx = l3mdev_master_ifindex_rcu(dev);
4096     +
4097     + rcu_read_unlock();
4098     +
4099     + if (sk->sk_bound_dev_if &&
4100     + sk->sk_bound_dev_if != val &&
4101     + (!midx || midx != sk->sk_bound_dev_if))
4102     + goto e_inval;
4103     }
4104     np->mcast_oif = val;
4105     retv = 0;
4106     diff --git a/net/ipv6/route.c b/net/ipv6/route.c
4107     index 61729641e027..6e8bacb0b458 100644
4108     --- a/net/ipv6/route.c
4109     +++ b/net/ipv6/route.c
4110     @@ -3495,7 +3495,11 @@ static int ip6_route_dev_notify(struct notifier_block *this,
4111     net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
4112     net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4113     #endif
4114     - } else if (event == NETDEV_UNREGISTER) {
4115     + } else if (event == NETDEV_UNREGISTER &&
4116     + dev->reg_state != NETREG_UNREGISTERED) {
4117     + /* NETDEV_UNREGISTER could be fired for multiple times by
4118     + * netdev_wait_allrefs(). Make sure we only call this once.
4119     + */
4120     in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
4121     #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4122     in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
4123     diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
4124     index 34c2add2c455..03dbc6bd8598 100644
4125     --- a/net/mac80211/ieee80211_i.h
4126     +++ b/net/mac80211/ieee80211_i.h
4127     @@ -681,7 +681,6 @@ struct ieee80211_if_mesh {
4128     const struct ieee80211_mesh_sync_ops *sync_ops;
4129     s64 sync_offset_clockdrift_max;
4130     spinlock_t sync_offset_lock;
4131     - bool adjusting_tbtt;
4132     /* mesh power save */
4133     enum nl80211_mesh_power_mode nonpeer_pm;
4134     int ps_peers_light_sleep;
4135     diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
4136     index 50e1b7f78bd4..5c67a696e046 100644
4137     --- a/net/mac80211/mesh.c
4138     +++ b/net/mac80211/mesh.c
4139     @@ -279,8 +279,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
4140     /* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
4141     *pos |= ifmsh->ps_peers_deep_sleep ?
4142     IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
4143     - *pos++ |= ifmsh->adjusting_tbtt ?
4144     - IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
4145     *pos++ = 0x00;
4146    
4147     return 0;
4148     @@ -850,7 +848,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
4149     ifmsh->mesh_cc_id = 0; /* Disabled */
4150     /* register sync ops from extensible synchronization framework */
4151     ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
4152     - ifmsh->adjusting_tbtt = false;
4153     ifmsh->sync_offset_clockdrift_max = 0;
4154     set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
4155     ieee80211_mesh_root_setup(ifmsh);
4156     diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
4157     index 7fcdcf622655..fcba70e57073 100644
4158     --- a/net/mac80211/mesh_plink.c
4159     +++ b/net/mac80211/mesh_plink.c
4160     @@ -505,12 +505,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
4161    
4162     /* Userspace handles station allocation */
4163     if (sdata->u.mesh.user_mpm ||
4164     - sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
4165     - cfg80211_notify_new_peer_candidate(sdata->dev, addr,
4166     - elems->ie_start,
4167     - elems->total_len,
4168     - GFP_KERNEL);
4169     - else
4170     + sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
4171     + if (mesh_peer_accepts_plinks(elems) &&
4172     + mesh_plink_availables(sdata))
4173     + cfg80211_notify_new_peer_candidate(sdata->dev, addr,
4174     + elems->ie_start,
4175     + elems->total_len,
4176     + GFP_KERNEL);
4177     + } else
4178     sta = __mesh_sta_info_alloc(sdata, addr);
4179    
4180     return sta;
4181     diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
4182     index faca22cd02b5..75608c07dc7b 100644
4183     --- a/net/mac80211/mesh_sync.c
4184     +++ b/net/mac80211/mesh_sync.c
4185     @@ -123,7 +123,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
4186     */
4187    
4188     if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
4189     - clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
4190     msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
4191     sta->sta.addr);
4192     goto no_sync;
4193     @@ -172,11 +171,9 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
4194     struct beacon_data *beacon)
4195     {
4196     struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
4197     - u8 cap;
4198    
4199     WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
4200     WARN_ON(!rcu_read_lock_held());
4201     - cap = beacon->meshconf->meshconf_cap;
4202    
4203     spin_lock_bh(&ifmsh->sync_offset_lock);
4204    
4205     @@ -190,21 +187,13 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
4206     "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
4207     ifmsh->sync_offset_clockdrift_max);
4208     set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
4209     -
4210     - ifmsh->adjusting_tbtt = true;
4211     } else {
4212     msync_dbg(sdata,
4213     "TBTT : max clockdrift=%lld; too small to adjust\n",
4214     (long long)ifmsh->sync_offset_clockdrift_max);
4215     ifmsh->sync_offset_clockdrift_max = 0;
4216     -
4217     - ifmsh->adjusting_tbtt = false;
4218     }
4219     spin_unlock_bh(&ifmsh->sync_offset_lock);
4220     -
4221     - beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ?
4222     - IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap :
4223     - ~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap;
4224     }
4225    
4226     static const struct sync_method sync_methods[] = {
4227     diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
4228     index 778fcdb83225..fa3ef25441e5 100644
4229     --- a/net/netfilter/nf_tables_api.c
4230     +++ b/net/netfilter/nf_tables_api.c
4231     @@ -2068,7 +2068,7 @@ static void nf_tables_rule_destroy(const struct nft_ctx *ctx,
4232     * is called on error from nf_tables_newrule().
4233     */
4234     expr = nft_expr_first(rule);
4235     - while (expr->ops && expr != nft_expr_last(rule)) {
4236     + while (expr != nft_expr_last(rule) && expr->ops) {
4237     nf_tables_expr_destroy(ctx, expr);
4238     expr = nft_expr_next(expr);
4239     }
4240     diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
4241     index 393d359a1889..ef4768a451f4 100644
4242     --- a/net/netfilter/nft_queue.c
4243     +++ b/net/netfilter/nft_queue.c
4244     @@ -38,7 +38,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
4245    
4246     if (priv->queues_total > 1) {
4247     if (priv->flags & NFT_QUEUE_FLAG_CPU_FANOUT) {
4248     - int cpu = smp_processor_id();
4249     + int cpu = raw_smp_processor_id();
4250    
4251     queue = priv->queuenum + cpu % priv->queues_total;
4252     } else {
4253     diff --git a/net/nfc/core.c b/net/nfc/core.c
4254     index 5cf33df888c3..c699d64a0753 100644
4255     --- a/net/nfc/core.c
4256     +++ b/net/nfc/core.c
4257     @@ -1106,7 +1106,7 @@ struct nfc_dev *nfc_allocate_device(struct nfc_ops *ops,
4258     err_free_dev:
4259     kfree(dev);
4260    
4261     - return ERR_PTR(rc);
4262     + return NULL;
4263     }
4264     EXPORT_SYMBOL(nfc_allocate_device);
4265    
4266     diff --git a/net/rds/ib_frmr.c b/net/rds/ib_frmr.c
4267     index d921adc62765..66b3d6228a15 100644
4268     --- a/net/rds/ib_frmr.c
4269     +++ b/net/rds/ib_frmr.c
4270     @@ -104,14 +104,15 @@ static int rds_ib_post_reg_frmr(struct rds_ib_mr *ibmr)
4271     struct rds_ib_frmr *frmr = &ibmr->u.frmr;
4272     struct ib_send_wr *failed_wr;
4273     struct ib_reg_wr reg_wr;
4274     - int ret;
4275     + int ret, off = 0;
4276    
4277     while (atomic_dec_return(&ibmr->ic->i_fastreg_wrs) <= 0) {
4278     atomic_inc(&ibmr->ic->i_fastreg_wrs);
4279     cpu_relax();
4280     }
4281    
4282     - ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len, 0, PAGE_SIZE);
4283     + ret = ib_map_mr_sg_zbva(frmr->mr, ibmr->sg, ibmr->sg_len,
4284     + &off, PAGE_SIZE);
4285     if (unlikely(ret != ibmr->sg_len))
4286     return ret < 0 ? ret : -EINVAL;
4287    
4288     diff --git a/net/rds/rdma.c b/net/rds/rdma.c
4289     index 8d3a851a3476..60e90f761838 100644
4290     --- a/net/rds/rdma.c
4291     +++ b/net/rds/rdma.c
4292     @@ -40,7 +40,6 @@
4293     /*
4294     * XXX
4295     * - build with sparse
4296     - * - should we limit the size of a mr region? let transport return failure?
4297     * - should we detect duplicate keys on a socket? hmm.
4298     * - an rdma is an mlock, apply rlimit?
4299     */
4300     @@ -200,6 +199,14 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
4301     goto out;
4302     }
4303    
4304     + /* Restrict the size of mr irrespective of underlying transport
4305     + * To account for unaligned mr regions, subtract one from nr_pages
4306     + */
4307     + if ((nr_pages - 1) > (RDS_MAX_MSG_SIZE >> PAGE_SHIFT)) {
4308     + ret = -EMSGSIZE;
4309     + goto out;
4310     + }
4311     +
4312     rdsdebug("RDS: get_mr addr %llx len %llu nr_pages %u\n",
4313     args->vec.addr, args->vec.bytes, nr_pages);
4314    
4315     diff --git a/net/rds/rds.h b/net/rds/rds.h
4316     index f107a968ddff..30a51fec0f63 100644
4317     --- a/net/rds/rds.h
4318     +++ b/net/rds/rds.h
4319     @@ -50,6 +50,9 @@ void rdsdebug(char *fmt, ...)
4320     #define RDS_FRAG_SHIFT 12
4321     #define RDS_FRAG_SIZE ((unsigned int)(1 << RDS_FRAG_SHIFT))
4322    
4323     +/* Used to limit both RDMA and non-RDMA RDS message to 1MB */
4324     +#define RDS_MAX_MSG_SIZE ((unsigned int)(1 << 20))
4325     +
4326     #define RDS_CONG_MAP_BYTES (65536 / 8)
4327     #define RDS_CONG_MAP_PAGES (PAGE_ALIGN(RDS_CONG_MAP_BYTES) / PAGE_SIZE)
4328     #define RDS_CONG_MAP_PAGE_BITS (PAGE_SIZE * 8)
4329     diff --git a/net/rds/send.c b/net/rds/send.c
4330     index f28651b6ae83..ad247dc71ebb 100644
4331     --- a/net/rds/send.c
4332     +++ b/net/rds/send.c
4333     @@ -946,6 +946,11 @@ static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
4334     ret = rds_cmsg_rdma_map(rs, rm, cmsg);
4335     if (!ret)
4336     *allocated_mr = 1;
4337     + else if (ret == -ENODEV)
4338     + /* Accommodate the get_mr() case which can fail
4339     + * if connection isn't established yet.
4340     + */
4341     + ret = -EAGAIN;
4342     break;
4343     case RDS_CMSG_ATOMIC_CSWP:
4344     case RDS_CMSG_ATOMIC_FADD:
4345     @@ -988,6 +993,26 @@ static int rds_send_mprds_hash(struct rds_sock *rs, struct rds_connection *conn)
4346     return hash;
4347     }
4348    
4349     +static int rds_rdma_bytes(struct msghdr *msg, size_t *rdma_bytes)
4350     +{
4351     + struct rds_rdma_args *args;
4352     + struct cmsghdr *cmsg;
4353     +
4354     + for_each_cmsghdr(cmsg, msg) {
4355     + if (!CMSG_OK(msg, cmsg))
4356     + return -EINVAL;
4357     +
4358     + if (cmsg->cmsg_level != SOL_RDS)
4359     + continue;
4360     +
4361     + if (cmsg->cmsg_type == RDS_CMSG_RDMA_ARGS) {
4362     + args = CMSG_DATA(cmsg);
4363     + *rdma_bytes += args->remote_vec.bytes;
4364     + }
4365     + }
4366     + return 0;
4367     +}
4368     +
4369     int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
4370     {
4371     struct sock *sk = sock->sk;
4372     @@ -1002,6 +1027,7 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
4373     int nonblock = msg->msg_flags & MSG_DONTWAIT;
4374     long timeo = sock_sndtimeo(sk, nonblock);
4375     struct rds_conn_path *cpath;
4376     + size_t total_payload_len = payload_len, rdma_payload_len = 0;
4377    
4378     /* Mirror Linux UDP mirror of BSD error message compatibility */
4379     /* XXX: Perhaps MSG_MORE someday */
4380     @@ -1034,6 +1060,16 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
4381     }
4382     release_sock(sk);
4383    
4384     + ret = rds_rdma_bytes(msg, &rdma_payload_len);
4385     + if (ret)
4386     + goto out;
4387     +
4388     + total_payload_len += rdma_payload_len;
4389     + if (max_t(size_t, payload_len, rdma_payload_len) > RDS_MAX_MSG_SIZE) {
4390     + ret = -EMSGSIZE;
4391     + goto out;
4392     + }
4393     +
4394     if (payload_len > rds_sk_sndbuf(rs)) {
4395     ret = -EMSGSIZE;
4396     goto out;
4397     @@ -1083,8 +1119,12 @@ int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len)
4398    
4399     /* Parse any control messages the user may have included. */
4400     ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
4401     - if (ret)
4402     + if (ret) {
4403     + /* Trigger connection so that its ready for the next retry */
4404     + if (ret == -EAGAIN)
4405     + rds_conn_connect_if_down(conn);
4406     goto out;
4407     + }
4408    
4409     if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
4410     printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
4411     diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
4412     index 8a398b3fb532..2f633eec6b7a 100644
4413     --- a/net/vmw_vsock/af_vsock.c
4414     +++ b/net/vmw_vsock/af_vsock.c
4415     @@ -1524,8 +1524,7 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
4416     long timeout;
4417     int err;
4418     struct vsock_transport_send_notify_data send_data;
4419     -
4420     - DEFINE_WAIT(wait);
4421     + DEFINE_WAIT_FUNC(wait, woken_wake_function);
4422    
4423     sk = sock->sk;
4424     vsk = vsock_sk(sk);
4425     @@ -1568,11 +1567,10 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
4426     if (err < 0)
4427     goto out;
4428    
4429     -
4430     while (total_written < len) {
4431     ssize_t written;
4432    
4433     - prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
4434     + add_wait_queue(sk_sleep(sk), &wait);
4435     while (vsock_stream_has_space(vsk) == 0 &&
4436     sk->sk_err == 0 &&
4437     !(sk->sk_shutdown & SEND_SHUTDOWN) &&
4438     @@ -1581,33 +1579,30 @@ static int vsock_stream_sendmsg(struct socket *sock, struct msghdr *msg,
4439     /* Don't wait for non-blocking sockets. */
4440     if (timeout == 0) {
4441     err = -EAGAIN;
4442     - finish_wait(sk_sleep(sk), &wait);
4443     + remove_wait_queue(sk_sleep(sk), &wait);
4444     goto out_err;
4445     }
4446    
4447     err = transport->notify_send_pre_block(vsk, &send_data);
4448     if (err < 0) {
4449     - finish_wait(sk_sleep(sk), &wait);
4450     + remove_wait_queue(sk_sleep(sk), &wait);
4451     goto out_err;
4452     }
4453    
4454     release_sock(sk);
4455     - timeout = schedule_timeout(timeout);
4456     + timeout = wait_woken(&wait, TASK_INTERRUPTIBLE, timeout);
4457     lock_sock(sk);
4458     if (signal_pending(current)) {
4459     err = sock_intr_errno(timeout);
4460     - finish_wait(sk_sleep(sk), &wait);
4461     + remove_wait_queue(sk_sleep(sk), &wait);
4462     goto out_err;
4463     } else if (timeout == 0) {
4464     err = -EAGAIN;
4465     - finish_wait(sk_sleep(sk), &wait);
4466     + remove_wait_queue(sk_sleep(sk), &wait);
4467     goto out_err;
4468     }
4469     -
4470     - prepare_to_wait(sk_sleep(sk), &wait,
4471     - TASK_INTERRUPTIBLE);
4472     }
4473     - finish_wait(sk_sleep(sk), &wait);
4474     + remove_wait_queue(sk_sleep(sk), &wait);
4475    
4476     /* These checks occur both as part of and after the loop
4477     * conditional since we need to check before and after
4478     diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
4479     index 7f0598b32f13..c80d80e312e3 100644
4480     --- a/sound/core/pcm_lib.c
4481     +++ b/sound/core/pcm_lib.c
4482     @@ -264,8 +264,10 @@ static void update_audio_tstamp(struct snd_pcm_substream *substream,
4483     runtime->rate);
4484     *audio_tstamp = ns_to_timespec(audio_nsecs);
4485     }
4486     - runtime->status->audio_tstamp = *audio_tstamp;
4487     - runtime->status->tstamp = *curr_tstamp;
4488     + if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
4489     + runtime->status->audio_tstamp = *audio_tstamp;
4490     + runtime->status->tstamp = *curr_tstamp;
4491     + }
4492    
4493     /*
4494     * re-take a driver timestamp to let apps detect if the reference tstamp
4495     diff --git a/sound/core/timer_compat.c b/sound/core/timer_compat.c
4496     index 59127b6ef39e..e00f7e399e46 100644
4497     --- a/sound/core/timer_compat.c
4498     +++ b/sound/core/timer_compat.c
4499     @@ -66,11 +66,11 @@ static int snd_timer_user_info_compat(struct file *file,
4500     struct snd_timer *t;
4501    
4502     tu = file->private_data;
4503     - if (snd_BUG_ON(!tu->timeri))
4504     - return -ENXIO;
4505     + if (!tu->timeri)
4506     + return -EBADFD;
4507     t = tu->timeri->timer;
4508     - if (snd_BUG_ON(!t))
4509     - return -ENXIO;
4510     + if (!t)
4511     + return -EBADFD;
4512     memset(&info, 0, sizeof(info));
4513     info.card = t->card ? t->card->number : -1;
4514     if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
4515     @@ -99,8 +99,8 @@ static int snd_timer_user_status_compat(struct file *file,
4516     struct snd_timer_status32 status;
4517    
4518     tu = file->private_data;
4519     - if (snd_BUG_ON(!tu->timeri))
4520     - return -ENXIO;
4521     + if (!tu->timeri)
4522     + return -EBADFD;
4523     memset(&status, 0, sizeof(status));
4524     status.tstamp.tv_sec = tu->tstamp.tv_sec;
4525     status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
4526     diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
4527     index 81acc20c2535..f21633cd9b38 100644
4528     --- a/sound/hda/hdmi_chmap.c
4529     +++ b/sound/hda/hdmi_chmap.c
4530     @@ -746,7 +746,7 @@ static int hdmi_chmap_ctl_get(struct snd_kcontrol *kcontrol,
4531     memset(pcm_chmap, 0, sizeof(pcm_chmap));
4532     chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap);
4533    
4534     - for (i = 0; i < sizeof(chmap); i++)
4535     + for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++)
4536     ucontrol->value.integer.value[i] = pcm_chmap[i];
4537    
4538     return 0;
4539     diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
4540     index 5cb7e04fa4ba..293f3f213776 100644
4541     --- a/sound/pci/hda/hda_intel.c
4542     +++ b/sound/pci/hda/hda_intel.c
4543     @@ -2305,6 +2305,9 @@ static const struct pci_device_id azx_ids[] = {
4544     /* AMD Hudson */
4545     { PCI_DEVICE(0x1022, 0x780d),
4546     .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
4547     + /* AMD Raven */
4548     + { PCI_DEVICE(0x1022, 0x15e3),
4549     + .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
4550     /* ATI HDMI */
4551     { PCI_DEVICE(0x1002, 0x0002),
4552     .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
4553     diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
4554     index 80c40a1b8b65..d7fa7373cb94 100644
4555     --- a/sound/pci/hda/patch_realtek.c
4556     +++ b/sound/pci/hda/patch_realtek.c
4557     @@ -4419,7 +4419,7 @@ static void alc_no_shutup(struct hda_codec *codec)
4558     static void alc_fixup_no_shutup(struct hda_codec *codec,
4559     const struct hda_fixup *fix, int action)
4560     {
4561     - if (action == HDA_FIXUP_ACT_PRE_PROBE) {
4562     + if (action == HDA_FIXUP_ACT_PROBE) {
4563     struct alc_spec *spec = codec->spec;
4564     spec->shutup = alc_no_shutup;
4565     }
4566     @@ -6272,7 +6272,7 @@ static int patch_alc269(struct hda_codec *codec)
4567     case 0x10ec0703:
4568     spec->codec_variant = ALC269_TYPE_ALC700;
4569     spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
4570     - alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
4571     + alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
4572     break;
4573    
4574     }
4575     diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
4576     index 3bdd81930486..757af795cebd 100644
4577     --- a/sound/soc/codecs/wm_adsp.c
4578     +++ b/sound/soc/codecs/wm_adsp.c
4579     @@ -1365,7 +1365,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
4580     const struct wmfw_region *region;
4581     const struct wm_adsp_region *mem;
4582     const char *region_name;
4583     - char *file, *text;
4584     + char *file, *text = NULL;
4585     struct wm_adsp_buf *buf;
4586     unsigned int reg;
4587     int regions = 0;
4588     @@ -1526,10 +1526,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
4589     regions, le32_to_cpu(region->len), offset,
4590     region_name);
4591    
4592     + if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
4593     + firmware->size) {
4594     + adsp_err(dsp,
4595     + "%s.%d: %s region len %d bytes exceeds file length %zu\n",
4596     + file, regions, region_name,
4597     + le32_to_cpu(region->len), firmware->size);
4598     + ret = -EINVAL;
4599     + goto out_fw;
4600     + }
4601     +
4602     if (text) {
4603     memcpy(text, region->data, le32_to_cpu(region->len));
4604     adsp_info(dsp, "%s: %s\n", file, text);
4605     kfree(text);
4606     + text = NULL;
4607     }
4608    
4609     if (reg) {
4610     @@ -1574,6 +1585,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
4611     regmap_async_complete(regmap);
4612     wm_adsp_buf_free(&buf_list);
4613     release_firmware(firmware);
4614     + kfree(text);
4615     out:
4616     kfree(file);
4617    
4618     @@ -2054,6 +2066,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
4619     }
4620    
4621     if (reg) {
4622     + if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
4623     + firmware->size) {
4624     + adsp_err(dsp,
4625     + "%s.%d: %s region len %d bytes exceeds file length %zu\n",
4626     + file, blocks, region_name,
4627     + le32_to_cpu(blk->len),
4628     + firmware->size);
4629     + ret = -EINVAL;
4630     + goto out_fw;
4631     + }
4632     +
4633     buf = wm_adsp_buf_alloc(blk->data,
4634     le32_to_cpu(blk->len),
4635     &buf_list);
4636     diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
4637     index f18141098b50..91b444db575e 100644
4638     --- a/sound/soc/sh/rcar/core.c
4639     +++ b/sound/soc/sh/rcar/core.c
4640     @@ -978,10 +978,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
4641     return -ENOMEM;
4642    
4643     ret = snd_ctl_add(card, kctrl);
4644     - if (ret < 0) {
4645     - snd_ctl_free_one(kctrl);
4646     + if (ret < 0)
4647     return ret;
4648     - }
4649    
4650     cfg->update = update;
4651     cfg->card = card;
4652     diff --git a/sound/usb/clock.c b/sound/usb/clock.c
4653     index 26dd5f20f149..eb3396ffba4c 100644
4654     --- a/sound/usb/clock.c
4655     +++ b/sound/usb/clock.c
4656     @@ -43,7 +43,7 @@ static struct uac_clock_source_descriptor *
4657     while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
4658     ctrl_iface->extralen,
4659     cs, UAC2_CLOCK_SOURCE))) {
4660     - if (cs->bClockID == clock_id)
4661     + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
4662     return cs;
4663     }
4664    
4665     @@ -59,8 +59,11 @@ static struct uac_clock_selector_descriptor *
4666     while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
4667     ctrl_iface->extralen,
4668     cs, UAC2_CLOCK_SELECTOR))) {
4669     - if (cs->bClockID == clock_id)
4670     + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
4671     + if (cs->bLength < 5 + cs->bNrInPins)
4672     + return NULL;
4673     return cs;
4674     + }
4675     }
4676    
4677     return NULL;
4678     @@ -75,7 +78,7 @@ static struct uac_clock_multiplier_descriptor *
4679     while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
4680     ctrl_iface->extralen,
4681     cs, UAC2_CLOCK_MULTIPLIER))) {
4682     - if (cs->bClockID == clock_id)
4683     + if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
4684     return cs;
4685     }
4686    
4687     diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
4688     index d82e3c81c258..9133d3e53d9d 100644
4689     --- a/sound/usb/mixer.c
4690     +++ b/sound/usb/mixer.c
4691     @@ -1463,6 +1463,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
4692     __u8 *bmaControls;
4693    
4694     if (state->mixer->protocol == UAC_VERSION_1) {
4695     + if (hdr->bLength < 7) {
4696     + usb_audio_err(state->chip,
4697     + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
4698     + unitid);
4699     + return -EINVAL;
4700     + }
4701     csize = hdr->bControlSize;
4702     if (!csize) {
4703     usb_audio_dbg(state->chip,
4704     @@ -1480,6 +1486,12 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid,
4705     }
4706     } else {
4707     struct uac2_feature_unit_descriptor *ftr = _ftr;
4708     + if (hdr->bLength < 6) {
4709     + usb_audio_err(state->chip,
4710     + "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
4711     + unitid);
4712     + return -EINVAL;
4713     + }
4714     csize = 4;
4715     channels = (hdr->bLength - 6) / 4 - 1;
4716     bmaControls = ftr->bmaControls;
4717     @@ -2080,7 +2092,8 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
4718     const struct usbmix_name_map *map;
4719     char **namelist;
4720    
4721     - if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
4722     + if (desc->bLength < 5 || !desc->bNrInPins ||
4723     + desc->bLength < 5 + desc->bNrInPins) {
4724     usb_audio_err(state->chip,
4725     "invalid SELECTOR UNIT descriptor %d\n", unitid);
4726     return -EINVAL;