Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0146-4.19.47-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3425 - (show annotations) (download)
Fri Aug 2 11:47:54 2019 UTC (4 years, 8 months ago) by niro
File size: 310188 byte(s)
-linux-4.19.47
1 diff --git a/Documentation/arm64/silicon-errata.txt b/Documentation/arm64/silicon-errata.txt
2 index 3c6fc2e08d04..eeb3fc9d777b 100644
3 --- a/Documentation/arm64/silicon-errata.txt
4 +++ b/Documentation/arm64/silicon-errata.txt
5 @@ -58,6 +58,7 @@ stable kernels.
6 | ARM | Cortex-A72 | #853709 | N/A |
7 | ARM | Cortex-A73 | #858921 | ARM64_ERRATUM_858921 |
8 | ARM | Cortex-A55 | #1024718 | ARM64_ERRATUM_1024718 |
9 +| ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 |
10 | ARM | MMU-500 | #841119,#826419 | N/A |
11 | | | | |
12 | Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
13 diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt
14 index 9ecde517728c..2793d4eac55f 100644
15 --- a/Documentation/sysctl/net.txt
16 +++ b/Documentation/sysctl/net.txt
17 @@ -92,6 +92,14 @@ Values :
18 0 - disable JIT kallsyms export (default value)
19 1 - enable JIT kallsyms export for privileged users only
20
21 +bpf_jit_limit
22 +-------------
23 +
24 +This enforces a global limit for memory allocations to the BPF JIT
25 +compiler in order to reject unprivileged JIT requests once it has
26 +been surpassed. bpf_jit_limit contains the value of the global limit
27 +in bytes.
28 +
29 dev_weight
30 --------------
31
32 diff --git a/Makefile b/Makefile
33 index 5383dd317d59..b3ba28ff73d5 100644
34 --- a/Makefile
35 +++ b/Makefile
36 @@ -1,7 +1,7 @@
37 # SPDX-License-Identifier: GPL-2.0
38 VERSION = 4
39 PATCHLEVEL = 19
40 -SUBLEVEL = 46
41 +SUBLEVEL = 47
42 EXTRAVERSION =
43 NAME = "People's Front"
44
45 diff --git a/arch/arm/include/asm/cp15.h b/arch/arm/include/asm/cp15.h
46 index 07e27f212dc7..d2453e2d3f1f 100644
47 --- a/arch/arm/include/asm/cp15.h
48 +++ b/arch/arm/include/asm/cp15.h
49 @@ -68,6 +68,8 @@
50 #define BPIALL __ACCESS_CP15(c7, 0, c5, 6)
51 #define ICIALLU __ACCESS_CP15(c7, 0, c5, 0)
52
53 +#define CNTVCT __ACCESS_CP15_64(1, c14)
54 +
55 extern unsigned long cr_alignment; /* defined in entry-armv.S */
56
57 static inline unsigned long get_cr(void)
58 diff --git a/arch/arm/vdso/vgettimeofday.c b/arch/arm/vdso/vgettimeofday.c
59 index a9dd619c6c29..7bdbf5d5c47d 100644
60 --- a/arch/arm/vdso/vgettimeofday.c
61 +++ b/arch/arm/vdso/vgettimeofday.c
62 @@ -18,9 +18,9 @@
63 #include <linux/compiler.h>
64 #include <linux/hrtimer.h>
65 #include <linux/time.h>
66 -#include <asm/arch_timer.h>
67 #include <asm/barrier.h>
68 #include <asm/bug.h>
69 +#include <asm/cp15.h>
70 #include <asm/page.h>
71 #include <asm/unistd.h>
72 #include <asm/vdso_datapage.h>
73 @@ -123,7 +123,8 @@ static notrace u64 get_ns(struct vdso_data *vdata)
74 u64 cycle_now;
75 u64 nsec;
76
77 - cycle_now = arch_counter_get_cntvct();
78 + isb();
79 + cycle_now = read_sysreg(CNTVCT);
80
81 cycle_delta = (cycle_now - vdata->cs_cycle_last) & vdata->cs_mask;
82
83 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
84 index 1b1a0e95c751..8790a29d0af4 100644
85 --- a/arch/arm64/Kconfig
86 +++ b/arch/arm64/Kconfig
87 @@ -479,6 +479,24 @@ config ARM64_ERRATUM_1024718
88
89 If unsure, say Y.
90
91 +config ARM64_ERRATUM_1463225
92 + bool "Cortex-A76: Software Step might prevent interrupt recognition"
93 + default y
94 + help
95 + This option adds a workaround for Arm Cortex-A76 erratum 1463225.
96 +
97 + On the affected Cortex-A76 cores (r0p0 to r3p1), software stepping
98 + of a system call instruction (SVC) can prevent recognition of
99 + subsequent interrupts when software stepping is disabled in the
100 + exception handler of the system call and either kernel debugging
101 + is enabled or VHE is in use.
102 +
103 + Work around the erratum by triggering a dummy step exception
104 + when handling a system call from a task that is being stepped
105 + in a VHE configuration of the kernel.
106 +
107 + If unsure, say Y.
108 +
109 config CAVIUM_ERRATUM_22375
110 bool "Cavium erratum 22375, 24313"
111 default y
112 diff --git a/arch/arm64/include/asm/cpucaps.h b/arch/arm64/include/asm/cpucaps.h
113 index ae1f70450fb2..25ce9056cf64 100644
114 --- a/arch/arm64/include/asm/cpucaps.h
115 +++ b/arch/arm64/include/asm/cpucaps.h
116 @@ -51,7 +51,8 @@
117 #define ARM64_SSBD 30
118 #define ARM64_MISMATCHED_CACHE_TYPE 31
119 #define ARM64_HAS_STAGE2_FWB 32
120 +#define ARM64_WORKAROUND_1463225 33
121
122 -#define ARM64_NCAPS 33
123 +#define ARM64_NCAPS 34
124
125 #endif /* __ASM_CPUCAPS_H */
126 diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
127 index ea690b3562af..b4a48419769f 100644
128 --- a/arch/arm64/include/asm/cputype.h
129 +++ b/arch/arm64/include/asm/cputype.h
130 @@ -86,6 +86,7 @@
131 #define ARM_CPU_PART_CORTEX_A75 0xD0A
132 #define ARM_CPU_PART_CORTEX_A35 0xD04
133 #define ARM_CPU_PART_CORTEX_A55 0xD05
134 +#define ARM_CPU_PART_CORTEX_A76 0xD0B
135
136 #define APM_CPU_PART_POTENZA 0x000
137
138 @@ -110,6 +111,7 @@
139 #define MIDR_CORTEX_A75 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A75)
140 #define MIDR_CORTEX_A35 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A35)
141 #define MIDR_CORTEX_A55 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A55)
142 +#define MIDR_CORTEX_A76 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A76)
143 #define MIDR_THUNDERX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX)
144 #define MIDR_THUNDERX_81XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_81XX)
145 #define MIDR_THUNDERX_83XX MIDR_CPU_MODEL(ARM_CPU_IMP_CAVIUM, CAVIUM_CPU_PART_THUNDERX_83XX)
146 diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
147 index 1bdeca8918a6..ea423db39364 100644
148 --- a/arch/arm64/include/asm/pgtable.h
149 +++ b/arch/arm64/include/asm/pgtable.h
150 @@ -444,6 +444,8 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
151 return __pmd_to_phys(pmd);
152 }
153
154 +static inline void pte_unmap(pte_t *pte) { }
155 +
156 /* Find an entry in the third-level page table. */
157 #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
158
159 @@ -452,7 +454,6 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
160
161 #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr))
162 #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr))
163 -#define pte_unmap(pte) do { } while (0)
164 #define pte_unmap_nested(pte) do { } while (0)
165
166 #define pte_set_fixmap(addr) ((pte_t *)set_fixmap_offset(FIX_PTE, addr))
167 diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h
168 index 2b9a63771eda..f89263c8e11a 100644
169 --- a/arch/arm64/include/asm/vdso_datapage.h
170 +++ b/arch/arm64/include/asm/vdso_datapage.h
171 @@ -38,6 +38,7 @@ struct vdso_data {
172 __u32 tz_minuteswest; /* Whacky timezone stuff */
173 __u32 tz_dsttime;
174 __u32 use_syscall;
175 + __u32 hrtimer_res;
176 };
177
178 #endif /* !__ASSEMBLY__ */
179 diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
180 index 323aeb5f2fe6..92fba851ce53 100644
181 --- a/arch/arm64/kernel/asm-offsets.c
182 +++ b/arch/arm64/kernel/asm-offsets.c
183 @@ -99,7 +99,7 @@ int main(void)
184 DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
185 DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
186 DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
187 - DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
188 + DEFINE(CLOCK_REALTIME_RES, offsetof(struct vdso_data, hrtimer_res));
189 DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
190 DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
191 DEFINE(CLOCK_COARSE_RES, LOW_RES_NSEC);
192 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
193 index dec10898d688..dc6c535cbd13 100644
194 --- a/arch/arm64/kernel/cpu_errata.c
195 +++ b/arch/arm64/kernel/cpu_errata.c
196 @@ -411,6 +411,22 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
197 }
198 #endif /* CONFIG_ARM64_SSBD */
199
200 +#ifdef CONFIG_ARM64_ERRATUM_1463225
201 +DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
202 +
203 +static bool
204 +has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
205 + int scope)
206 +{
207 + u32 midr = read_cpuid_id();
208 + /* Cortex-A76 r0p0 - r3p1 */
209 + struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
210 +
211 + WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
212 + return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
213 +}
214 +#endif
215 +
216 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
217 .matches = is_affected_midr_range, \
218 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
219 @@ -679,6 +695,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
220 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
221 .matches = has_ssbd_mitigation,
222 },
223 +#endif
224 +#ifdef CONFIG_ARM64_ERRATUM_1463225
225 + {
226 + .desc = "ARM erratum 1463225",
227 + .capability = ARM64_WORKAROUND_1463225,
228 + .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
229 + .matches = has_cortex_a76_erratum_1463225,
230 + },
231 #endif
232 {
233 }
234 diff --git a/arch/arm64/kernel/cpu_ops.c b/arch/arm64/kernel/cpu_ops.c
235 index ea001241bdd4..00f8b8612b69 100644
236 --- a/arch/arm64/kernel/cpu_ops.c
237 +++ b/arch/arm64/kernel/cpu_ops.c
238 @@ -85,6 +85,7 @@ static const char *__init cpu_read_enable_method(int cpu)
239 pr_err("%pOF: missing enable-method property\n",
240 dn);
241 }
242 + of_node_put(dn);
243 } else {
244 enable_method = acpi_get_enable_method(cpu);
245 if (!enable_method) {
246 diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
247 index b09b6f75f759..06941c1fe418 100644
248 --- a/arch/arm64/kernel/kaslr.c
249 +++ b/arch/arm64/kernel/kaslr.c
250 @@ -145,15 +145,15 @@ u64 __init kaslr_early_init(u64 dt_phys)
251
252 if (IS_ENABLED(CONFIG_RANDOMIZE_MODULE_REGION_FULL)) {
253 /*
254 - * Randomize the module region over a 4 GB window covering the
255 + * Randomize the module region over a 2 GB window covering the
256 * kernel. This reduces the risk of modules leaking information
257 * about the address of the kernel itself, but results in
258 * branches between modules and the core kernel that are
259 * resolved via PLTs. (Branches between modules will be
260 * resolved normally.)
261 */
262 - module_range = SZ_4G - (u64)(_end - _stext);
263 - module_alloc_base = max((u64)_end + offset - SZ_4G,
264 + module_range = SZ_2G - (u64)(_end - _stext);
265 + module_alloc_base = max((u64)_end + offset - SZ_2G,
266 (u64)MODULES_VADDR);
267 } else {
268 /*
269 diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
270 index f0f27aeefb73..0b368ceccee4 100644
271 --- a/arch/arm64/kernel/module.c
272 +++ b/arch/arm64/kernel/module.c
273 @@ -56,7 +56,7 @@ void *module_alloc(unsigned long size)
274 * can simply omit this fallback in that case.
275 */
276 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
277 - module_alloc_base + SZ_4G, GFP_KERNEL,
278 + module_alloc_base + SZ_2G, GFP_KERNEL,
279 PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
280 __builtin_return_address(0));
281
282 diff --git a/arch/arm64/kernel/syscall.c b/arch/arm64/kernel/syscall.c
283 index 5610ac01c1ec..871c739f060a 100644
284 --- a/arch/arm64/kernel/syscall.c
285 +++ b/arch/arm64/kernel/syscall.c
286 @@ -8,6 +8,7 @@
287 #include <linux/syscalls.h>
288
289 #include <asm/daifflags.h>
290 +#include <asm/debug-monitors.h>
291 #include <asm/fpsimd.h>
292 #include <asm/syscall.h>
293 #include <asm/thread_info.h>
294 @@ -60,6 +61,35 @@ static inline bool has_syscall_work(unsigned long flags)
295 int syscall_trace_enter(struct pt_regs *regs);
296 void syscall_trace_exit(struct pt_regs *regs);
297
298 +#ifdef CONFIG_ARM64_ERRATUM_1463225
299 +DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
300 +
301 +static void cortex_a76_erratum_1463225_svc_handler(void)
302 +{
303 + u32 reg, val;
304 +
305 + if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
306 + return;
307 +
308 + if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
309 + return;
310 +
311 + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
312 + reg = read_sysreg(mdscr_el1);
313 + val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
314 + write_sysreg(val, mdscr_el1);
315 + asm volatile("msr daifclr, #8");
316 + isb();
317 +
318 + /* We will have taken a single-step exception by this point */
319 +
320 + write_sysreg(reg, mdscr_el1);
321 + __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
322 +}
323 +#else
324 +static void cortex_a76_erratum_1463225_svc_handler(void) { }
325 +#endif /* CONFIG_ARM64_ERRATUM_1463225 */
326 +
327 static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
328 const syscall_fn_t syscall_table[])
329 {
330 @@ -68,6 +98,7 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
331 regs->orig_x0 = regs->regs[0];
332 regs->syscallno = scno;
333
334 + cortex_a76_erratum_1463225_svc_handler();
335 local_daif_restore(DAIF_PROCCTX);
336 user_exit();
337
338 diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
339 index 2d419006ad43..ec0bb588d755 100644
340 --- a/arch/arm64/kernel/vdso.c
341 +++ b/arch/arm64/kernel/vdso.c
342 @@ -232,6 +232,9 @@ void update_vsyscall(struct timekeeper *tk)
343 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
344 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
345
346 + /* Read without the seqlock held by clock_getres() */
347 + WRITE_ONCE(vdso_data->hrtimer_res, hrtimer_resolution);
348 +
349 if (!use_syscall) {
350 /* tkr_mono.cycle_last == tkr_raw.cycle_last */
351 vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
352 diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
353 index e8f60112818f..856fee6d3512 100644
354 --- a/arch/arm64/kernel/vdso/gettimeofday.S
355 +++ b/arch/arm64/kernel/vdso/gettimeofday.S
356 @@ -308,13 +308,14 @@ ENTRY(__kernel_clock_getres)
357 ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
358 b.ne 1f
359
360 - ldr x2, 5f
361 + adr vdso_data, _vdso_data
362 + ldr w2, [vdso_data, #CLOCK_REALTIME_RES]
363 b 2f
364 1:
365 cmp w0, #CLOCK_REALTIME_COARSE
366 ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
367 b.ne 4f
368 - ldr x2, 6f
369 + ldr x2, 5f
370 2:
371 cbz x1, 3f
372 stp xzr, x2, [x1]
373 @@ -328,8 +329,6 @@ ENTRY(__kernel_clock_getres)
374 svc #0
375 ret
376 5:
377 - .quad CLOCK_REALTIME_RES
378 -6:
379 .quad CLOCK_COARSE_RES
380 .cfi_endproc
381 ENDPROC(__kernel_clock_getres)
382 diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
383 index c389f2bef938..d3a5bb16f0b2 100644
384 --- a/arch/arm64/mm/dma-mapping.c
385 +++ b/arch/arm64/mm/dma-mapping.c
386 @@ -664,6 +664,11 @@ static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
387 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
388 return ret;
389
390 + if (!is_vmalloc_addr(cpu_addr)) {
391 + unsigned long pfn = page_to_pfn(virt_to_page(cpu_addr));
392 + return __swiotlb_mmap_pfn(vma, pfn, size);
393 + }
394 +
395 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
396 /*
397 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
398 @@ -687,6 +692,11 @@ static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
399 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
400 struct vm_struct *area = find_vm_area(cpu_addr);
401
402 + if (!is_vmalloc_addr(cpu_addr)) {
403 + struct page *page = virt_to_page(cpu_addr);
404 + return __swiotlb_get_sgtable_page(sgt, page, size);
405 + }
406 +
407 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
408 /*
409 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
410 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
411 index a4c134677285..88cf0a0cb616 100644
412 --- a/arch/arm64/mm/fault.c
413 +++ b/arch/arm64/mm/fault.c
414 @@ -827,14 +827,47 @@ void __init hook_debug_fault_code(int nr,
415 debug_fault_info[nr].name = name;
416 }
417
418 +#ifdef CONFIG_ARM64_ERRATUM_1463225
419 +DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
420 +
421 +static int __exception
422 +cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
423 +{
424 + if (user_mode(regs))
425 + return 0;
426 +
427 + if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
428 + return 0;
429 +
430 + /*
431 + * We've taken a dummy step exception from the kernel to ensure
432 + * that interrupts are re-enabled on the syscall path. Return back
433 + * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
434 + * masked so that we can safely restore the mdscr and get on with
435 + * handling the syscall.
436 + */
437 + regs->pstate |= PSR_D_BIT;
438 + return 1;
439 +}
440 +#else
441 +static int __exception
442 +cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
443 +{
444 + return 0;
445 +}
446 +#endif /* CONFIG_ARM64_ERRATUM_1463225 */
447 +
448 asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
449 - unsigned int esr,
450 - struct pt_regs *regs)
451 + unsigned int esr,
452 + struct pt_regs *regs)
453 {
454 const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
455 unsigned long pc = instruction_pointer(regs);
456 int rv;
457
458 + if (cortex_a76_erratum_1463225_debug_handler(regs))
459 + return 0;
460 +
461 /*
462 * Tell lockdep we disabled irqs in entry.S. Do nothing if they were
463 * already disabled to preserve the last enabled/disabled addresses.
464 diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c
465 index 9d9f6f334d3c..3da3e2b1b51b 100644
466 --- a/arch/powerpc/boot/addnote.c
467 +++ b/arch/powerpc/boot/addnote.c
468 @@ -223,7 +223,11 @@ main(int ac, char **av)
469 PUT_16(E_PHNUM, np + 2);
470
471 /* write back */
472 - lseek(fd, (long) 0, SEEK_SET);
473 + i = lseek(fd, (long) 0, SEEK_SET);
474 + if (i < 0) {
475 + perror("lseek");
476 + exit(1);
477 + }
478 i = write(fd, buf, n);
479 if (i < 0) {
480 perror("write");
481 diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
482 index 4898e9491a1c..9168a247e24f 100644
483 --- a/arch/powerpc/kernel/head_64.S
484 +++ b/arch/powerpc/kernel/head_64.S
485 @@ -970,7 +970,9 @@ start_here_multiplatform:
486
487 /* Restore parameters passed from prom_init/kexec */
488 mr r3,r31
489 - bl early_setup /* also sets r13 and SPRG_PACA */
490 + LOAD_REG_ADDR(r12, DOTSYM(early_setup))
491 + mtctr r12
492 + bctrl /* also sets r13 and SPRG_PACA */
493
494 LOAD_REG_ADDR(r3, start_here_common)
495 ld r4,PACAKMSR(r13)
496 diff --git a/arch/powerpc/kernel/watchdog.c b/arch/powerpc/kernel/watchdog.c
497 index 3c6ab22a0c4e..af3c15a1d41e 100644
498 --- a/arch/powerpc/kernel/watchdog.c
499 +++ b/arch/powerpc/kernel/watchdog.c
500 @@ -77,7 +77,7 @@ static u64 wd_smp_panic_timeout_tb __read_mostly; /* panic other CPUs */
501
502 static u64 wd_timer_period_ms __read_mostly; /* interval between heartbeat */
503
504 -static DEFINE_PER_CPU(struct timer_list, wd_timer);
505 +static DEFINE_PER_CPU(struct hrtimer, wd_hrtimer);
506 static DEFINE_PER_CPU(u64, wd_timer_tb);
507
508 /* SMP checker bits */
509 @@ -293,21 +293,21 @@ out:
510 nmi_exit();
511 }
512
513 -static void wd_timer_reset(unsigned int cpu, struct timer_list *t)
514 -{
515 - t->expires = jiffies + msecs_to_jiffies(wd_timer_period_ms);
516 - if (wd_timer_period_ms > 1000)
517 - t->expires = __round_jiffies_up(t->expires, cpu);
518 - add_timer_on(t, cpu);
519 -}
520 -
521 -static void wd_timer_fn(struct timer_list *t)
522 +static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
523 {
524 int cpu = smp_processor_id();
525
526 + if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
527 + return HRTIMER_NORESTART;
528 +
529 + if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
530 + return HRTIMER_NORESTART;
531 +
532 watchdog_timer_interrupt(cpu);
533
534 - wd_timer_reset(cpu, t);
535 + hrtimer_forward_now(hrtimer, ms_to_ktime(wd_timer_period_ms));
536 +
537 + return HRTIMER_RESTART;
538 }
539
540 void arch_touch_nmi_watchdog(void)
541 @@ -323,37 +323,22 @@ void arch_touch_nmi_watchdog(void)
542 }
543 EXPORT_SYMBOL(arch_touch_nmi_watchdog);
544
545 -static void start_watchdog_timer_on(unsigned int cpu)
546 -{
547 - struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
548 -
549 - per_cpu(wd_timer_tb, cpu) = get_tb();
550 -
551 - timer_setup(t, wd_timer_fn, TIMER_PINNED);
552 - wd_timer_reset(cpu, t);
553 -}
554 -
555 -static void stop_watchdog_timer_on(unsigned int cpu)
556 -{
557 - struct timer_list *t = per_cpu_ptr(&wd_timer, cpu);
558 -
559 - del_timer_sync(t);
560 -}
561 -
562 -static int start_wd_on_cpu(unsigned int cpu)
563 +static void start_watchdog(void *arg)
564 {
565 + struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
566 + int cpu = smp_processor_id();
567 unsigned long flags;
568
569 if (cpumask_test_cpu(cpu, &wd_cpus_enabled)) {
570 WARN_ON(1);
571 - return 0;
572 + return;
573 }
574
575 if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
576 - return 0;
577 + return;
578
579 if (!cpumask_test_cpu(cpu, &watchdog_cpumask))
580 - return 0;
581 + return;
582
583 wd_smp_lock(&flags);
584 cpumask_set_cpu(cpu, &wd_cpus_enabled);
585 @@ -363,27 +348,40 @@ static int start_wd_on_cpu(unsigned int cpu)
586 }
587 wd_smp_unlock(&flags);
588
589 - start_watchdog_timer_on(cpu);
590 + *this_cpu_ptr(&wd_timer_tb) = get_tb();
591
592 - return 0;
593 + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
594 + hrtimer->function = watchdog_timer_fn;
595 + hrtimer_start(hrtimer, ms_to_ktime(wd_timer_period_ms),
596 + HRTIMER_MODE_REL_PINNED);
597 }
598
599 -static int stop_wd_on_cpu(unsigned int cpu)
600 +static int start_watchdog_on_cpu(unsigned int cpu)
601 {
602 + return smp_call_function_single(cpu, start_watchdog, NULL, true);
603 +}
604 +
605 +static void stop_watchdog(void *arg)
606 +{
607 + struct hrtimer *hrtimer = this_cpu_ptr(&wd_hrtimer);
608 + int cpu = smp_processor_id();
609 unsigned long flags;
610
611 if (!cpumask_test_cpu(cpu, &wd_cpus_enabled))
612 - return 0; /* Can happen in CPU unplug case */
613 + return; /* Can happen in CPU unplug case */
614
615 - stop_watchdog_timer_on(cpu);
616 + hrtimer_cancel(hrtimer);
617
618 wd_smp_lock(&flags);
619 cpumask_clear_cpu(cpu, &wd_cpus_enabled);
620 wd_smp_unlock(&flags);
621
622 wd_smp_clear_cpu_pending(cpu, get_tb());
623 +}
624
625 - return 0;
626 +static int stop_watchdog_on_cpu(unsigned int cpu)
627 +{
628 + return smp_call_function_single(cpu, stop_watchdog, NULL, true);
629 }
630
631 static void watchdog_calc_timeouts(void)
632 @@ -402,7 +400,7 @@ void watchdog_nmi_stop(void)
633 int cpu;
634
635 for_each_cpu(cpu, &wd_cpus_enabled)
636 - stop_wd_on_cpu(cpu);
637 + stop_watchdog_on_cpu(cpu);
638 }
639
640 void watchdog_nmi_start(void)
641 @@ -411,7 +409,7 @@ void watchdog_nmi_start(void)
642
643 watchdog_calc_timeouts();
644 for_each_cpu_and(cpu, cpu_online_mask, &watchdog_cpumask)
645 - start_wd_on_cpu(cpu);
646 + start_watchdog_on_cpu(cpu);
647 }
648
649 /*
650 @@ -423,7 +421,8 @@ int __init watchdog_nmi_probe(void)
651
652 err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
653 "powerpc/watchdog:online",
654 - start_wd_on_cpu, stop_wd_on_cpu);
655 + start_watchdog_on_cpu,
656 + stop_watchdog_on_cpu);
657 if (err < 0) {
658 pr_warn("could not be initialized");
659 return err;
660 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
661 index 10fb43efef50..f473c05e9649 100644
662 --- a/arch/powerpc/mm/numa.c
663 +++ b/arch/powerpc/mm/numa.c
664 @@ -1495,6 +1495,9 @@ int start_topology_update(void)
665 {
666 int rc = 0;
667
668 + if (!topology_updates_enabled)
669 + return 0;
670 +
671 if (firmware_has_feature(FW_FEATURE_PRRN)) {
672 if (!prrn_enabled) {
673 prrn_enabled = 1;
674 @@ -1524,6 +1527,9 @@ int stop_topology_update(void)
675 {
676 int rc = 0;
677
678 + if (!topology_updates_enabled)
679 + return 0;
680 +
681 if (prrn_enabled) {
682 prrn_enabled = 0;
683 #ifdef CONFIG_SMP
684 @@ -1579,11 +1585,13 @@ static ssize_t topology_write(struct file *file, const char __user *buf,
685
686 kbuf[read_len] = '\0';
687
688 - if (!strncmp(kbuf, "on", 2))
689 + if (!strncmp(kbuf, "on", 2)) {
690 + topology_updates_enabled = true;
691 start_topology_update();
692 - else if (!strncmp(kbuf, "off", 3))
693 + } else if (!strncmp(kbuf, "off", 3)) {
694 stop_topology_update();
695 - else
696 + topology_updates_enabled = false;
697 + } else
698 return -EINVAL;
699
700 return count;
701 @@ -1598,9 +1606,7 @@ static const struct file_operations topology_ops = {
702
703 static int topology_update_init(void)
704 {
705 - /* Do not poll for changes if disabled at boot */
706 - if (topology_updates_enabled)
707 - start_topology_update();
708 + start_topology_update();
709
710 if (vphn_enabled)
711 topology_schedule_update();
712 diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
713 index 1fafc32b12a0..555322677074 100644
714 --- a/arch/powerpc/perf/imc-pmu.c
715 +++ b/arch/powerpc/perf/imc-pmu.c
716 @@ -496,6 +496,11 @@ static int nest_imc_event_init(struct perf_event *event)
717 * Get the base memory addresss for this cpu.
718 */
719 chip_id = cpu_to_chip_id(event->cpu);
720 +
721 + /* Return, if chip_id is not valid */
722 + if (chip_id < 0)
723 + return -ENODEV;
724 +
725 pcni = pmu->mem_info;
726 do {
727 if (pcni->id == chip_id) {
728 @@ -503,7 +508,7 @@ static int nest_imc_event_init(struct perf_event *event)
729 break;
730 }
731 pcni++;
732 - } while (pcni);
733 + } while (pcni->vbase != 0);
734
735 if (!flag)
736 return -ENODEV;
737 diff --git a/arch/powerpc/platforms/powernv/opal-imc.c b/arch/powerpc/platforms/powernv/opal-imc.c
738 index 58a07948c76e..3d27f02695e4 100644
739 --- a/arch/powerpc/platforms/powernv/opal-imc.c
740 +++ b/arch/powerpc/platforms/powernv/opal-imc.c
741 @@ -127,7 +127,7 @@ static int imc_get_mem_addr_nest(struct device_node *node,
742 nr_chips))
743 goto error;
744
745 - pmu_ptr->mem_info = kcalloc(nr_chips, sizeof(*pmu_ptr->mem_info),
746 + pmu_ptr->mem_info = kcalloc(nr_chips + 1, sizeof(*pmu_ptr->mem_info),
747 GFP_KERNEL);
748 if (!pmu_ptr->mem_info)
749 goto error;
750 diff --git a/arch/s390/kernel/kexec_elf.c b/arch/s390/kernel/kexec_elf.c
751 index 5a286b012043..602e7cc26d11 100644
752 --- a/arch/s390/kernel/kexec_elf.c
753 +++ b/arch/s390/kernel/kexec_elf.c
754 @@ -19,10 +19,15 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
755 struct kexec_buf buf;
756 const Elf_Ehdr *ehdr;
757 const Elf_Phdr *phdr;
758 + Elf_Addr entry;
759 int i, ret;
760
761 ehdr = (Elf_Ehdr *)kernel;
762 buf.image = image;
763 + if (image->type == KEXEC_TYPE_CRASH)
764 + entry = STARTUP_KDUMP_OFFSET;
765 + else
766 + entry = ehdr->e_entry;
767
768 phdr = (void *)ehdr + ehdr->e_phoff;
769 for (i = 0; i < ehdr->e_phnum; i++, phdr++) {
770 @@ -35,7 +40,7 @@ static int kexec_file_add_elf_kernel(struct kimage *image,
771 buf.mem = ALIGN(phdr->p_paddr, phdr->p_align);
772 buf.memsz = phdr->p_memsz;
773
774 - if (phdr->p_paddr == 0) {
775 + if (entry - phdr->p_paddr < phdr->p_memsz) {
776 data->kernel_buf = buf.buffer;
777 data->memsz += STARTUP_NORMAL_OFFSET;
778
779 diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
780 index f2cc7da473e4..ae894ac83fd6 100644
781 --- a/arch/s390/mm/pgtable.c
782 +++ b/arch/s390/mm/pgtable.c
783 @@ -410,6 +410,7 @@ static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
784 return old;
785 }
786
787 +#ifdef CONFIG_PGSTE
788 static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
789 {
790 pgd_t *pgd;
791 @@ -427,6 +428,7 @@ static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
792 pmd = pmd_alloc(mm, pud, addr);
793 return pmd;
794 }
795 +#endif
796
797 pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
798 pmd_t *pmdp, pmd_t new)
799 diff --git a/arch/sh/include/cpu-sh4/cpu/sh7786.h b/arch/sh/include/cpu-sh4/cpu/sh7786.h
800 index 96b8cb1f754a..029bbadaf7ab 100644
801 --- a/arch/sh/include/cpu-sh4/cpu/sh7786.h
802 +++ b/arch/sh/include/cpu-sh4/cpu/sh7786.h
803 @@ -135,7 +135,7 @@ enum {
804
805 static inline u32 sh7786_mm_sel(void)
806 {
807 - return __raw_readl(0xFC400020) & 0x7;
808 + return __raw_readl((const volatile void __iomem *)0xFC400020) & 0x7;
809 }
810
811 #endif /* __CPU_SH7786_H__ */
812 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
813 index ffc823a8312f..ab2071e40efe 100644
814 --- a/arch/x86/Makefile
815 +++ b/arch/x86/Makefile
816 @@ -47,7 +47,7 @@ export REALMODE_CFLAGS
817 export BITS
818
819 ifdef CONFIG_X86_NEED_RELOCS
820 - LDFLAGS_vmlinux := --emit-relocs
821 + LDFLAGS_vmlinux := --emit-relocs --discard-none
822 endif
823
824 #
825 diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
826 index 56194c571299..4a650eb3d94a 100644
827 --- a/arch/x86/events/intel/cstate.c
828 +++ b/arch/x86/events/intel/cstate.c
829 @@ -584,6 +584,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
830 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_X, glm_cstates),
831
832 X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
833 +
834 + X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_MOBILE, snb_cstates),
835 { },
836 };
837 MODULE_DEVICE_TABLE(x86cpu, intel_cstates_match);
838 diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
839 index 91039ffed633..2413169ce362 100644
840 --- a/arch/x86/events/intel/rapl.c
841 +++ b/arch/x86/events/intel/rapl.c
842 @@ -780,6 +780,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = {
843 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_X, hsw_rapl_init),
844
845 X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT_PLUS, hsw_rapl_init),
846 +
847 + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ICELAKE_MOBILE, skl_rapl_init),
848 {},
849 };
850
851 diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
852 index 1b9f85abf9bc..ace6c1e752fb 100644
853 --- a/arch/x86/events/msr.c
854 +++ b/arch/x86/events/msr.c
855 @@ -89,6 +89,7 @@ static bool test_intel(int idx)
856 case INTEL_FAM6_SKYLAKE_X:
857 case INTEL_FAM6_KABYLAKE_MOBILE:
858 case INTEL_FAM6_KABYLAKE_DESKTOP:
859 + case INTEL_FAM6_ICELAKE_MOBILE:
860 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
861 return true;
862 break;
863 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
864 index 86b1341cba9a..513ba49c204f 100644
865 --- a/arch/x86/ia32/ia32_signal.c
866 +++ b/arch/x86/ia32/ia32_signal.c
867 @@ -61,9 +61,8 @@
868 } while (0)
869
870 #define RELOAD_SEG(seg) { \
871 - unsigned int pre = GET_SEG(seg); \
872 + unsigned int pre = (seg) | 3; \
873 unsigned int cur = get_user_seg(seg); \
874 - pre |= 3; \
875 if (pre != cur) \
876 set_user_seg(seg, pre); \
877 }
878 @@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
879 struct sigcontext_32 __user *sc)
880 {
881 unsigned int tmpflags, err = 0;
882 + u16 gs, fs, es, ds;
883 void __user *buf;
884 u32 tmp;
885
886 @@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
887 current->restart_block.fn = do_no_restart_syscall;
888
889 get_user_try {
890 - /*
891 - * Reload fs and gs if they have changed in the signal
892 - * handler. This does not handle long fs/gs base changes in
893 - * the handler, but does not clobber them at least in the
894 - * normal case.
895 - */
896 - RELOAD_SEG(gs);
897 - RELOAD_SEG(fs);
898 - RELOAD_SEG(ds);
899 - RELOAD_SEG(es);
900 + gs = GET_SEG(gs);
901 + fs = GET_SEG(fs);
902 + ds = GET_SEG(ds);
903 + es = GET_SEG(es);
904
905 COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx);
906 COPY(dx); COPY(cx); COPY(ip); COPY(ax);
907 @@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
908 buf = compat_ptr(tmp);
909 } get_user_catch(err);
910
911 + /*
912 + * Reload fs and gs if they have changed in the signal
913 + * handler. This does not handle long fs/gs base changes in
914 + * the handler, but does not clobber them at least in the
915 + * normal case.
916 + */
917 + RELOAD_SEG(gs);
918 + RELOAD_SEG(fs);
919 + RELOAD_SEG(ds);
920 + RELOAD_SEG(es);
921 +
922 err |= fpu__restore_sig(buf, 1);
923
924 force_iret();
925 diff --git a/arch/x86/include/asm/text-patching.h b/arch/x86/include/asm/text-patching.h
926 index 05861cc08787..0bbb07eaed6b 100644
927 --- a/arch/x86/include/asm/text-patching.h
928 +++ b/arch/x86/include/asm/text-patching.h
929 @@ -39,6 +39,7 @@ extern int poke_int3_handler(struct pt_regs *regs);
930 extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
931 extern int after_bootmem;
932
933 +#ifndef CONFIG_UML_X86
934 static inline void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
935 {
936 regs->ip = ip;
937 @@ -65,6 +66,7 @@ static inline void int3_emulate_call(struct pt_regs *regs, unsigned long func)
938 int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
939 int3_emulate_jmp(regs, func);
940 }
941 -#endif
942 +#endif /* CONFIG_X86_64 */
943 +#endif /* !CONFIG_UML_X86 */
944
945 #endif /* _ASM_X86_TEXT_PATCHING_H */
946 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
947 index b9d5e7c9ef43..918a23704c0c 100644
948 --- a/arch/x86/kernel/alternative.c
949 +++ b/arch/x86/kernel/alternative.c
950 @@ -662,15 +662,29 @@ void __init alternative_instructions(void)
951 * handlers seeing an inconsistent instruction while you patch.
952 */
953 void *__init_or_module text_poke_early(void *addr, const void *opcode,
954 - size_t len)
955 + size_t len)
956 {
957 unsigned long flags;
958 - local_irq_save(flags);
959 - memcpy(addr, opcode, len);
960 - local_irq_restore(flags);
961 - sync_core();
962 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
963 - that causes hangs on some VIA CPUs. */
964 +
965 + if (boot_cpu_has(X86_FEATURE_NX) &&
966 + is_module_text_address((unsigned long)addr)) {
967 + /*
968 + * Modules text is marked initially as non-executable, so the
969 + * code cannot be running and speculative code-fetches are
970 + * prevented. Just change the code.
971 + */
972 + memcpy(addr, opcode, len);
973 + } else {
974 + local_irq_save(flags);
975 + memcpy(addr, opcode, len);
976 + local_irq_restore(flags);
977 + sync_core();
978 +
979 + /*
980 + * Could also do a CLFLUSH here to speed up CPU recovery; but
981 + * that causes hangs on some VIA CPUs.
982 + */
983 + }
984 return addr;
985 }
986
987 diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
988 index c805a06e14c3..ff1c00b695ae 100644
989 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
990 +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
991 @@ -46,8 +46,6 @@
992 static struct mce i_mce;
993 static struct dentry *dfs_inj;
994
995 -static u8 n_banks;
996 -
997 #define MAX_FLAG_OPT_SIZE 4
998 #define NBCFG 0x44
999
1000 @@ -567,9 +565,15 @@ err:
1001 static int inj_bank_set(void *data, u64 val)
1002 {
1003 struct mce *m = (struct mce *)data;
1004 + u8 n_banks;
1005 + u64 cap;
1006 +
1007 + /* Get bank count on target CPU so we can handle non-uniform values. */
1008 + rdmsrl_on_cpu(m->extcpu, MSR_IA32_MCG_CAP, &cap);
1009 + n_banks = cap & MCG_BANKCNT_MASK;
1010
1011 if (val >= n_banks) {
1012 - pr_err("Non-existent MCE bank: %llu\n", val);
1013 + pr_err("MCA bank %llu non-existent on CPU%d\n", val, m->extcpu);
1014 return -EINVAL;
1015 }
1016
1017 @@ -659,10 +663,6 @@ static struct dfs_node {
1018 static int __init debugfs_init(void)
1019 {
1020 unsigned int i;
1021 - u64 cap;
1022 -
1023 - rdmsrl(MSR_IA32_MCG_CAP, cap);
1024 - n_banks = cap & MCG_BANKCNT_MASK;
1025
1026 dfs_inj = debugfs_create_dir("mce-inject", NULL);
1027 if (!dfs_inj)
1028 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
1029 index f9e7096b1804..fee118b3b69f 100644
1030 --- a/arch/x86/kernel/cpu/mcheck/mce.c
1031 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
1032 @@ -711,19 +711,49 @@ bool machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
1033
1034 barrier();
1035 m.status = mce_rdmsrl(msr_ops.status(i));
1036 +
1037 + /* If this entry is not valid, ignore it */
1038 if (!(m.status & MCI_STATUS_VAL))
1039 continue;
1040
1041 /*
1042 - * Uncorrected or signalled events are handled by the exception
1043 - * handler when it is enabled, so don't process those here.
1044 - *
1045 - * TBD do the same check for MCI_STATUS_EN here?
1046 + * If we are logging everything (at CPU online) or this
1047 + * is a corrected error, then we must log it.
1048 */
1049 - if (!(flags & MCP_UC) &&
1050 - (m.status & (mca_cfg.ser ? MCI_STATUS_S : MCI_STATUS_UC)))
1051 - continue;
1052 + if ((flags & MCP_UC) || !(m.status & MCI_STATUS_UC))
1053 + goto log_it;
1054 +
1055 + /*
1056 + * Newer Intel systems that support software error
1057 + * recovery need to make additional checks. Other
1058 + * CPUs should skip over uncorrected errors, but log
1059 + * everything else.
1060 + */
1061 + if (!mca_cfg.ser) {
1062 + if (m.status & MCI_STATUS_UC)
1063 + continue;
1064 + goto log_it;
1065 + }
1066 +
1067 + /* Log "not enabled" (speculative) errors */
1068 + if (!(m.status & MCI_STATUS_EN))
1069 + goto log_it;
1070 +
1071 + /*
1072 + * Log UCNA (SDM: 15.6.3 "UCR Error Classification")
1073 + * UC == 1 && PCC == 0 && S == 0
1074 + */
1075 + if (!(m.status & MCI_STATUS_PCC) && !(m.status & MCI_STATUS_S))
1076 + goto log_it;
1077 +
1078 + /*
1079 + * Skip anything else. Presumption is that our read of this
1080 + * bank is racing with a machine check. Leave the log alone
1081 + * for do_machine_check() to deal with it.
1082 + */
1083 + continue;
1084
1085 +log_it:
1086 error_seen = true;
1087
1088 mce_read_aux(&m, i);
1089 @@ -1450,13 +1480,12 @@ EXPORT_SYMBOL_GPL(mce_notify_irq);
1090 static int __mcheck_cpu_mce_banks_init(void)
1091 {
1092 int i;
1093 - u8 num_banks = mca_cfg.banks;
1094
1095 - mce_banks = kcalloc(num_banks, sizeof(struct mce_bank), GFP_KERNEL);
1096 + mce_banks = kcalloc(MAX_NR_BANKS, sizeof(struct mce_bank), GFP_KERNEL);
1097 if (!mce_banks)
1098 return -ENOMEM;
1099
1100 - for (i = 0; i < num_banks; i++) {
1101 + for (i = 0; i < MAX_NR_BANKS; i++) {
1102 struct mce_bank *b = &mce_banks[i];
1103
1104 b->ctl = -1ULL;
1105 @@ -1470,28 +1499,19 @@ static int __mcheck_cpu_mce_banks_init(void)
1106 */
1107 static int __mcheck_cpu_cap_init(void)
1108 {
1109 - unsigned b;
1110 u64 cap;
1111 + u8 b;
1112
1113 rdmsrl(MSR_IA32_MCG_CAP, cap);
1114
1115 b = cap & MCG_BANKCNT_MASK;
1116 - if (!mca_cfg.banks)
1117 - pr_info("CPU supports %d MCE banks\n", b);
1118 -
1119 - if (b > MAX_NR_BANKS) {
1120 - pr_warn("Using only %u machine check banks out of %u\n",
1121 - MAX_NR_BANKS, b);
1122 + if (WARN_ON_ONCE(b > MAX_NR_BANKS))
1123 b = MAX_NR_BANKS;
1124 - }
1125
1126 - /* Don't support asymmetric configurations today */
1127 - WARN_ON(mca_cfg.banks != 0 && b != mca_cfg.banks);
1128 - mca_cfg.banks = b;
1129 + mca_cfg.banks = max(mca_cfg.banks, b);
1130
1131 if (!mce_banks) {
1132 int err = __mcheck_cpu_mce_banks_init();
1133 -
1134 if (err)
1135 return err;
1136 }
1137 @@ -2473,6 +2493,8 @@ EXPORT_SYMBOL_GPL(mcsafe_key);
1138
1139 static int __init mcheck_late_init(void)
1140 {
1141 + pr_info("Using %d MCE banks\n", mca_cfg.banks);
1142 +
1143 if (mca_cfg.recovery)
1144 static_branch_inc(&mcsafe_key);
1145
1146 diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
1147 index b9bc8a1a584e..b43ddefd77f4 100644
1148 --- a/arch/x86/kernel/cpu/microcode/core.c
1149 +++ b/arch/x86/kernel/cpu/microcode/core.c
1150 @@ -418,8 +418,9 @@ static int do_microcode_update(const void __user *buf, size_t size)
1151 if (ustate == UCODE_ERROR) {
1152 error = -1;
1153 break;
1154 - } else if (ustate == UCODE_OK)
1155 + } else if (ustate == UCODE_NEW) {
1156 apply_microcode_on_target(cpu);
1157 + }
1158 }
1159
1160 return error;
1161 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
1162 index 0469cd078db1..b50ac9c7397b 100644
1163 --- a/arch/x86/kernel/irq_64.c
1164 +++ b/arch/x86/kernel/irq_64.c
1165 @@ -26,9 +26,18 @@ int sysctl_panic_on_stackoverflow;
1166 /*
1167 * Probabilistic stack overflow check:
1168 *
1169 - * Only check the stack in process context, because everything else
1170 - * runs on the big interrupt stacks. Checking reliably is too expensive,
1171 - * so we just check from interrupts.
1172 + * Regular device interrupts can enter on the following stacks:
1173 + *
1174 + * - User stack
1175 + *
1176 + * - Kernel task stack
1177 + *
1178 + * - Interrupt stack if a device driver reenables interrupts
1179 + * which should only happen in really old drivers.
1180 + *
1181 + * - Debug IST stack
1182 + *
1183 + * All other contexts are invalid.
1184 */
1185 static inline void stack_overflow_check(struct pt_regs *regs)
1186 {
1187 @@ -53,8 +62,8 @@ static inline void stack_overflow_check(struct pt_regs *regs)
1188 return;
1189
1190 oist = this_cpu_ptr(&orig_ist);
1191 - estack_top = (u64)oist->ist[0] - EXCEPTION_STKSZ + STACK_TOP_MARGIN;
1192 - estack_bottom = (u64)oist->ist[N_EXCEPTION_STACKS - 1];
1193 + estack_bottom = (u64)oist->ist[DEBUG_STACK];
1194 + estack_top = estack_bottom - DEBUG_STKSZ + STACK_TOP_MARGIN;
1195 if (regs->sp >= estack_top && regs->sp <= estack_bottom)
1196 return;
1197
1198 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
1199 index f58336af095c..6645f123419c 100644
1200 --- a/arch/x86/kernel/module.c
1201 +++ b/arch/x86/kernel/module.c
1202 @@ -87,7 +87,7 @@ void *module_alloc(unsigned long size)
1203 p = __vmalloc_node_range(size, MODULE_ALIGN,
1204 MODULES_VADDR + get_module_load_offset(),
1205 MODULES_END, GFP_KERNEL,
1206 - PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
1207 + PAGE_KERNEL, 0, NUMA_NO_NODE,
1208 __builtin_return_address(0));
1209 if (p && (kasan_module_alloc(p, size) < 0)) {
1210 vfree(p);
1211 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
1212 index 92a3b312a53c..44e647a65de8 100644
1213 --- a/arch/x86/kernel/signal.c
1214 +++ b/arch/x86/kernel/signal.c
1215 @@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs,
1216 COPY_SEG_CPL3(cs);
1217 COPY_SEG_CPL3(ss);
1218
1219 -#ifdef CONFIG_X86_64
1220 - /*
1221 - * Fix up SS if needed for the benefit of old DOSEMU and
1222 - * CRIU.
1223 - */
1224 - if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) &&
1225 - user_64bit_mode(regs)))
1226 - force_valid_ss(regs);
1227 -#endif
1228 -
1229 get_user_ex(tmpflags, &sc->flags);
1230 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
1231 regs->orig_ax = -1; /* disable syscall checks */
1232 @@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs,
1233 buf = (void __user *)buf_val;
1234 } get_user_catch(err);
1235
1236 +#ifdef CONFIG_X86_64
1237 + /*
1238 + * Fix up SS if needed for the benefit of old DOSEMU and
1239 + * CRIU.
1240 + */
1241 + if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs)))
1242 + force_valid_ss(regs);
1243 +#endif
1244 +
1245 err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32));
1246
1247 force_iret();
1248 @@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1249 {
1250 struct rt_sigframe __user *frame;
1251 void __user *fp = NULL;
1252 + unsigned long uc_flags;
1253 int err = 0;
1254
1255 frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp);
1256 @@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
1257 return -EFAULT;
1258 }
1259
1260 + uc_flags = frame_uc_flags(regs);
1261 +
1262 put_user_try {
1263 /* Create the ucontext. */
1264 - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1265 + put_user_ex(uc_flags, &frame->uc.uc_flags);
1266 put_user_ex(0, &frame->uc.uc_link);
1267 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1268
1269 @@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1270 {
1271 #ifdef CONFIG_X86_X32_ABI
1272 struct rt_sigframe_x32 __user *frame;
1273 + unsigned long uc_flags;
1274 void __user *restorer;
1275 int err = 0;
1276 void __user *fpstate = NULL;
1277 @@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig,
1278 return -EFAULT;
1279 }
1280
1281 + uc_flags = frame_uc_flags(regs);
1282 +
1283 put_user_try {
1284 /* Create the ucontext. */
1285 - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags);
1286 + put_user_ex(uc_flags, &frame->uc.uc_flags);
1287 put_user_ex(0, &frame->uc.uc_link);
1288 compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);
1289 put_user_ex(0, &frame->uc.uc__pad0);
1290 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
1291 index 85e6d5620188..2fb152d813c1 100644
1292 --- a/arch/x86/kernel/vmlinux.lds.S
1293 +++ b/arch/x86/kernel/vmlinux.lds.S
1294 @@ -151,11 +151,11 @@ SECTIONS
1295 *(.text.__x86.indirect_thunk)
1296 __indirect_thunk_end = .;
1297 #endif
1298 -
1299 - /* End of text section */
1300 - _etext = .;
1301 } :text = 0x9090
1302
1303 + /* End of text section */
1304 + _etext = .;
1305 +
1306 NOTES :text :note
1307
1308 EXCEPTION_TABLE(16) :text = 0x9090
1309 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
1310 index 8dd9208ae4de..ea454d3f7763 100644
1311 --- a/arch/x86/kvm/svm.c
1312 +++ b/arch/x86/kvm/svm.c
1313 @@ -2022,7 +2022,11 @@ static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1314 if (!kvm_vcpu_apicv_active(vcpu))
1315 return;
1316
1317 - if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1318 + /*
1319 + * Since the host physical APIC id is 8 bits,
1320 + * we can support host APIC ID upto 255.
1321 + */
1322 + if (WARN_ON(h_physical_id > AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK))
1323 return;
1324
1325 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1326 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
1327 index d0eb37c069b8..be4ba0975a0f 100644
1328 --- a/arch/x86/kvm/x86.c
1329 +++ b/arch/x86/kvm/x86.c
1330 @@ -1188,7 +1188,7 @@ static int set_efer(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1331 u64 efer = msr_info->data;
1332
1333 if (efer & efer_reserved_bits)
1334 - return false;
1335 + return 1;
1336
1337 if (!msr_info->host_initiated) {
1338 if (!__kvm_valid_efer(vcpu, efer))
1339 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
1340 index 3b24dc05251c..9d05572370ed 100644
1341 --- a/arch/x86/lib/memcpy_64.S
1342 +++ b/arch/x86/lib/memcpy_64.S
1343 @@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe)
1344 /* Copy successful. Return zero */
1345 .L_done_memcpy_trap:
1346 xorl %eax, %eax
1347 +.L_done:
1348 ret
1349 ENDPROC(__memcpy_mcsafe)
1350 EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1351 @@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe)
1352 addl %edx, %ecx
1353 .E_trailing_bytes:
1354 mov %ecx, %eax
1355 - ret
1356 + jmp .L_done
1357
1358 /*
1359 * For write fault handling, given the destination is unaligned,
1360 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
1361 index 47bebfe6efa7..9d9765e4d1ef 100644
1362 --- a/arch/x86/mm/fault.c
1363 +++ b/arch/x86/mm/fault.c
1364 @@ -427,8 +427,6 @@ static noinline int vmalloc_fault(unsigned long address)
1365 if (!(address >= VMALLOC_START && address < VMALLOC_END))
1366 return -1;
1367
1368 - WARN_ON_ONCE(in_nmi());
1369 -
1370 /*
1371 * Copy kernel mappings over when needed. This can also
1372 * happen within a race in page table update. In the later
1373 diff --git a/block/genhd.c b/block/genhd.c
1374 index be5bab20b2ab..2b2a936cf848 100644
1375 --- a/block/genhd.c
1376 +++ b/block/genhd.c
1377 @@ -518,6 +518,18 @@ void blk_free_devt(dev_t devt)
1378 }
1379 }
1380
1381 +/**
1382 + * We invalidate devt by assigning NULL pointer for devt in idr.
1383 + */
1384 +void blk_invalidate_devt(dev_t devt)
1385 +{
1386 + if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
1387 + spin_lock_bh(&ext_devt_lock);
1388 + idr_replace(&ext_devt_idr, NULL, blk_mangle_minor(MINOR(devt)));
1389 + spin_unlock_bh(&ext_devt_lock);
1390 + }
1391 +}
1392 +
1393 static char *bdevt_str(dev_t devt, char *buf)
1394 {
1395 if (MAJOR(devt) <= 0xff && MINOR(devt) <= 0xff) {
1396 @@ -769,6 +781,13 @@ void del_gendisk(struct gendisk *disk)
1397
1398 if (!(disk->flags & GENHD_FL_HIDDEN))
1399 blk_unregister_region(disk_devt(disk), disk->minors);
1400 + /*
1401 + * Remove gendisk pointer from idr so that it cannot be looked up
1402 + * while RCU period before freeing gendisk is running to prevent
1403 + * use-after-free issues. Note that the device number stays
1404 + * "in-use" until we really free the gendisk.
1405 + */
1406 + blk_invalidate_devt(disk_devt(disk));
1407
1408 kobject_put(disk->part0.holder_dir);
1409 kobject_put(disk->slave_dir);
1410 diff --git a/block/partition-generic.c b/block/partition-generic.c
1411 index 5f8db5c5140f..98d60a59b843 100644
1412 --- a/block/partition-generic.c
1413 +++ b/block/partition-generic.c
1414 @@ -289,6 +289,13 @@ void delete_partition(struct gendisk *disk, int partno)
1415 kobject_put(part->holder_dir);
1416 device_del(part_to_dev(part));
1417
1418 + /*
1419 + * Remove gendisk pointer from idr so that it cannot be looked up
1420 + * while RCU period before freeing gendisk is running to prevent
1421 + * use-after-free issues. Note that the device number stays
1422 + * "in-use" until we really free the gendisk.
1423 + */
1424 + blk_invalidate_devt(part_devt(part));
1425 hd_struct_kill(part);
1426 }
1427
1428 diff --git a/block/sed-opal.c b/block/sed-opal.c
1429 index e0de4dd448b3..119640897293 100644
1430 --- a/block/sed-opal.c
1431 +++ b/block/sed-opal.c
1432 @@ -2095,13 +2095,16 @@ static int opal_erase_locking_range(struct opal_dev *dev,
1433 static int opal_enable_disable_shadow_mbr(struct opal_dev *dev,
1434 struct opal_mbr_data *opal_mbr)
1435 {
1436 + u8 enable_disable = opal_mbr->enable_disable == OPAL_MBR_ENABLE ?
1437 + OPAL_TRUE : OPAL_FALSE;
1438 +
1439 const struct opal_step mbr_steps[] = {
1440 { opal_discovery0, },
1441 { start_admin1LSP_opal_session, &opal_mbr->key },
1442 - { set_mbr_done, &opal_mbr->enable_disable },
1443 + { set_mbr_done, &enable_disable },
1444 { end_opal_session, },
1445 { start_admin1LSP_opal_session, &opal_mbr->key },
1446 - { set_mbr_enable_disable, &opal_mbr->enable_disable },
1447 + { set_mbr_enable_disable, &enable_disable },
1448 { end_opal_session, },
1449 { NULL, }
1450 };
1451 @@ -2221,7 +2224,7 @@ static int __opal_lock_unlock(struct opal_dev *dev,
1452
1453 static int __opal_set_mbr_done(struct opal_dev *dev, struct opal_key *key)
1454 {
1455 - u8 mbr_done_tf = 1;
1456 + u8 mbr_done_tf = OPAL_TRUE;
1457 const struct opal_step mbrdone_step [] = {
1458 { opal_discovery0, },
1459 { start_admin1LSP_opal_session, key },
1460 diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
1461 index e48eebc27b81..43c2615434b4 100644
1462 --- a/drivers/acpi/arm64/iort.c
1463 +++ b/drivers/acpi/arm64/iort.c
1464 @@ -1231,18 +1231,24 @@ static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node)
1465 /*
1466 * set numa proximity domain for smmuv3 device
1467 */
1468 -static void __init arm_smmu_v3_set_proximity(struct device *dev,
1469 +static int __init arm_smmu_v3_set_proximity(struct device *dev,
1470 struct acpi_iort_node *node)
1471 {
1472 struct acpi_iort_smmu_v3 *smmu;
1473
1474 smmu = (struct acpi_iort_smmu_v3 *)node->node_data;
1475 if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) {
1476 - set_dev_node(dev, acpi_map_pxm_to_node(smmu->pxm));
1477 + int node = acpi_map_pxm_to_node(smmu->pxm);
1478 +
1479 + if (node != NUMA_NO_NODE && !node_online(node))
1480 + return -EINVAL;
1481 +
1482 + set_dev_node(dev, node);
1483 pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n",
1484 smmu->base_address,
1485 smmu->pxm);
1486 }
1487 + return 0;
1488 }
1489 #else
1490 #define arm_smmu_v3_set_proximity NULL
1491 @@ -1317,7 +1323,7 @@ struct iort_dev_config {
1492 int (*dev_count_resources)(struct acpi_iort_node *node);
1493 void (*dev_init_resources)(struct resource *res,
1494 struct acpi_iort_node *node);
1495 - void (*dev_set_proximity)(struct device *dev,
1496 + int (*dev_set_proximity)(struct device *dev,
1497 struct acpi_iort_node *node);
1498 };
1499
1500 @@ -1368,8 +1374,11 @@ static int __init iort_add_platform_device(struct acpi_iort_node *node,
1501 if (!pdev)
1502 return -ENOMEM;
1503
1504 - if (ops->dev_set_proximity)
1505 - ops->dev_set_proximity(&pdev->dev, node);
1506 + if (ops->dev_set_proximity) {
1507 + ret = ops->dev_set_proximity(&pdev->dev, node);
1508 + if (ret)
1509 + goto dev_put;
1510 + }
1511
1512 count = ops->dev_count_resources(node);
1513
1514 diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c
1515 index 693cf05b0cc4..288673cff85e 100644
1516 --- a/drivers/acpi/property.c
1517 +++ b/drivers/acpi/property.c
1518 @@ -975,6 +975,14 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
1519 const struct acpi_data_node *data = to_acpi_data_node(fwnode);
1520 struct acpi_data_node *dn;
1521
1522 + /*
1523 + * We can have a combination of device and data nodes, e.g. with
1524 + * hierarchical _DSD properties. Make sure the adev pointer is
1525 + * restored before going through data nodes, otherwise we will
1526 + * be looking for data_nodes below the last device found instead
1527 + * of the common fwnode shared by device_nodes and data_nodes.
1528 + */
1529 + adev = to_acpi_device_node(fwnode);
1530 if (adev)
1531 head = &adev->data.subnodes;
1532 else if (data)
1533 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
1534 index a690fd400260..4abd7c6531d9 100644
1535 --- a/drivers/base/power/main.c
1536 +++ b/drivers/base/power/main.c
1537 @@ -1736,6 +1736,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
1538 if (dev->power.syscore)
1539 goto Complete;
1540
1541 + /* Avoid direct_complete to let wakeup_path propagate. */
1542 + if (device_may_wakeup(dev) || dev->power.wakeup_path)
1543 + dev->power.direct_complete = false;
1544 +
1545 if (dev->power.direct_complete) {
1546 if (pm_runtime_status_suspended(dev)) {
1547 pm_runtime_disable(dev);
1548 diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
1549 index f0d593c3fa72..77004c29da08 100644
1550 --- a/drivers/bluetooth/hci_qca.c
1551 +++ b/drivers/bluetooth/hci_qca.c
1552 @@ -504,6 +504,8 @@ static int qca_open(struct hci_uart *hu)
1553 qcadev = serdev_device_get_drvdata(hu->serdev);
1554 if (qcadev->btsoc_type != QCA_WCN3990) {
1555 gpiod_set_value_cansleep(qcadev->bt_en, 1);
1556 + /* Controller needs time to bootup. */
1557 + msleep(150);
1558 } else {
1559 hu->init_speed = qcadev->init_speed;
1560 hu->oper_speed = qcadev->oper_speed;
1561 diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
1562 index b65ff6962899..e9b6ac61fb7f 100644
1563 --- a/drivers/char/hw_random/omap-rng.c
1564 +++ b/drivers/char/hw_random/omap-rng.c
1565 @@ -443,6 +443,7 @@ static int omap_rng_probe(struct platform_device *pdev)
1566 priv->rng.read = omap_rng_do_read;
1567 priv->rng.init = omap_rng_init;
1568 priv->rng.cleanup = omap_rng_cleanup;
1569 + priv->rng.quality = 900;
1570
1571 priv->rng.priv = (unsigned long)priv;
1572 platform_set_drvdata(pdev, priv);
1573 diff --git a/drivers/char/random.c b/drivers/char/random.c
1574 index c75b6cdf0053..0a84b7f468ad 100644
1575 --- a/drivers/char/random.c
1576 +++ b/drivers/char/random.c
1577 @@ -778,6 +778,7 @@ static struct crng_state **crng_node_pool __read_mostly;
1578 #endif
1579
1580 static void invalidate_batched_entropy(void);
1581 +static void numa_crng_init(void);
1582
1583 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
1584 static int __init parse_trust_cpu(char *arg)
1585 @@ -806,7 +807,9 @@ static void crng_initialize(struct crng_state *crng)
1586 }
1587 crng->state[i] ^= rv;
1588 }
1589 - if (trust_cpu && arch_init) {
1590 + if (trust_cpu && arch_init && crng == &primary_crng) {
1591 + invalidate_batched_entropy();
1592 + numa_crng_init();
1593 crng_init = 2;
1594 pr_notice("random: crng done (trusting CPU's manufacturer)\n");
1595 }
1596 @@ -2212,8 +2215,8 @@ struct batched_entropy {
1597 u32 entropy_u32[CHACHA20_BLOCK_SIZE / sizeof(u32)];
1598 };
1599 unsigned int position;
1600 + spinlock_t batch_lock;
1601 };
1602 -static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
1603
1604 /*
1605 * Get a random word for internal kernel use only. The quality of the random
1606 @@ -2223,12 +2226,14 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
1607 * wait_for_random_bytes() should be called and return 0 at least once
1608 * at any point prior.
1609 */
1610 -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
1611 +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
1612 + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
1613 +};
1614 +
1615 u64 get_random_u64(void)
1616 {
1617 u64 ret;
1618 - bool use_lock;
1619 - unsigned long flags = 0;
1620 + unsigned long flags;
1621 struct batched_entropy *batch;
1622 static void *previous;
1623
1624 @@ -2243,28 +2248,25 @@ u64 get_random_u64(void)
1625
1626 warn_unseeded_randomness(&previous);
1627
1628 - use_lock = READ_ONCE(crng_init) < 2;
1629 - batch = &get_cpu_var(batched_entropy_u64);
1630 - if (use_lock)
1631 - read_lock_irqsave(&batched_entropy_reset_lock, flags);
1632 + batch = raw_cpu_ptr(&batched_entropy_u64);
1633 + spin_lock_irqsave(&batch->batch_lock, flags);
1634 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
1635 extract_crng((__u32 *)batch->entropy_u64);
1636 batch->position = 0;
1637 }
1638 ret = batch->entropy_u64[batch->position++];
1639 - if (use_lock)
1640 - read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
1641 - put_cpu_var(batched_entropy_u64);
1642 + spin_unlock_irqrestore(&batch->batch_lock, flags);
1643 return ret;
1644 }
1645 EXPORT_SYMBOL(get_random_u64);
1646
1647 -static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
1648 +static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
1649 + .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
1650 +};
1651 u32 get_random_u32(void)
1652 {
1653 u32 ret;
1654 - bool use_lock;
1655 - unsigned long flags = 0;
1656 + unsigned long flags;
1657 struct batched_entropy *batch;
1658 static void *previous;
1659
1660 @@ -2273,18 +2275,14 @@ u32 get_random_u32(void)
1661
1662 warn_unseeded_randomness(&previous);
1663
1664 - use_lock = READ_ONCE(crng_init) < 2;
1665 - batch = &get_cpu_var(batched_entropy_u32);
1666 - if (use_lock)
1667 - read_lock_irqsave(&batched_entropy_reset_lock, flags);
1668 + batch = raw_cpu_ptr(&batched_entropy_u32);
1669 + spin_lock_irqsave(&batch->batch_lock, flags);
1670 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
1671 extract_crng(batch->entropy_u32);
1672 batch->position = 0;
1673 }
1674 ret = batch->entropy_u32[batch->position++];
1675 - if (use_lock)
1676 - read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
1677 - put_cpu_var(batched_entropy_u32);
1678 + spin_unlock_irqrestore(&batch->batch_lock, flags);
1679 return ret;
1680 }
1681 EXPORT_SYMBOL(get_random_u32);
1682 @@ -2298,12 +2296,19 @@ static void invalidate_batched_entropy(void)
1683 int cpu;
1684 unsigned long flags;
1685
1686 - write_lock_irqsave(&batched_entropy_reset_lock, flags);
1687 for_each_possible_cpu (cpu) {
1688 - per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
1689 - per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
1690 + struct batched_entropy *batched_entropy;
1691 +
1692 + batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
1693 + spin_lock_irqsave(&batched_entropy->batch_lock, flags);
1694 + batched_entropy->position = 0;
1695 + spin_unlock(&batched_entropy->batch_lock);
1696 +
1697 + batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
1698 + spin_lock(&batched_entropy->batch_lock);
1699 + batched_entropy->position = 0;
1700 + spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
1701 }
1702 - write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
1703 }
1704
1705 /**
1706 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
1707 index 5b5b5d72eab7..c55f6aeb4227 100644
1708 --- a/drivers/char/virtio_console.c
1709 +++ b/drivers/char/virtio_console.c
1710 @@ -75,7 +75,7 @@ struct ports_driver_data {
1711 /* All the console devices handled by this driver */
1712 struct list_head consoles;
1713 };
1714 -static struct ports_driver_data pdrvdata;
1715 +static struct ports_driver_data pdrvdata = { .next_vtermno = 1};
1716
1717 static DEFINE_SPINLOCK(pdrvdata_lock);
1718 static DECLARE_COMPLETION(early_console_added);
1719 @@ -1405,6 +1405,7 @@ static int add_port(struct ports_device *portdev, u32 id)
1720 port->async_queue = NULL;
1721
1722 port->cons.ws.ws_row = port->cons.ws.ws_col = 0;
1723 + port->cons.vtermno = 0;
1724
1725 port->host_connected = port->guest_connected = false;
1726 port->stats = (struct port_stats) { 0 };
1727 diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
1728 index 450de24a1b42..64191694ff6e 100644
1729 --- a/drivers/clk/rockchip/clk-rk3288.c
1730 +++ b/drivers/clk/rockchip/clk-rk3288.c
1731 @@ -198,7 +198,7 @@ PNAME(mux_hsadcout_p) = { "hsadc_src", "ext_hsadc" };
1732 PNAME(mux_edp_24m_p) = { "ext_edp_24m", "xin24m" };
1733 PNAME(mux_tspout_p) = { "cpll", "gpll", "npll", "xin27m" };
1734
1735 -PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vepu", "aclk_vdpu" };
1736 +PNAME(mux_aclk_vcodec_pre_p) = { "aclk_vdpu", "aclk_vepu" };
1737 PNAME(mux_usbphy480m_p) = { "sclk_otgphy1_480m", "sclk_otgphy2_480m",
1738 "sclk_otgphy0_480m" };
1739 PNAME(mux_hsicphy480m_p) = { "cpll", "gpll", "usbphy480m_src" };
1740 @@ -292,13 +292,13 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1741 COMPOSITE_NOMUX(0, "aclk_core_mp", "armclk", CLK_IGNORE_UNUSED,
1742 RK3288_CLKSEL_CON(0), 4, 4, DFLAGS | CLK_DIVIDER_READ_ONLY,
1743 RK3288_CLKGATE_CON(12), 6, GFLAGS),
1744 - COMPOSITE_NOMUX(0, "atclk", "armclk", CLK_IGNORE_UNUSED,
1745 + COMPOSITE_NOMUX(0, "atclk", "armclk", 0,
1746 RK3288_CLKSEL_CON(37), 4, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
1747 RK3288_CLKGATE_CON(12), 7, GFLAGS),
1748 COMPOSITE_NOMUX(0, "pclk_dbg_pre", "armclk", CLK_IGNORE_UNUSED,
1749 RK3288_CLKSEL_CON(37), 9, 5, DFLAGS | CLK_DIVIDER_READ_ONLY,
1750 RK3288_CLKGATE_CON(12), 8, GFLAGS),
1751 - GATE(0, "pclk_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
1752 + GATE(0, "pclk_dbg", "pclk_dbg_pre", 0,
1753 RK3288_CLKGATE_CON(12), 9, GFLAGS),
1754 GATE(0, "cs_dbg", "pclk_dbg_pre", CLK_IGNORE_UNUSED,
1755 RK3288_CLKGATE_CON(12), 10, GFLAGS),
1756 @@ -399,7 +399,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1757 COMPOSITE(0, "aclk_vdpu", mux_pll_src_cpll_gpll_usb480m_p, 0,
1758 RK3288_CLKSEL_CON(32), 14, 2, MFLAGS, 8, 5, DFLAGS,
1759 RK3288_CLKGATE_CON(3), 11, GFLAGS),
1760 - MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, 0,
1761 + MUXGRF(0, "aclk_vcodec_pre", mux_aclk_vcodec_pre_p, CLK_SET_RATE_PARENT,
1762 RK3288_GRF_SOC_CON(0), 7, 1, MFLAGS),
1763 GATE(ACLK_VCODEC, "aclk_vcodec", "aclk_vcodec_pre", 0,
1764 RK3288_CLKGATE_CON(9), 0, GFLAGS),
1765 @@ -626,7 +626,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1766 INVERTER(SCLK_HSADC, "sclk_hsadc", "sclk_hsadc_out",
1767 RK3288_CLKSEL_CON(22), 7, IFLAGS),
1768
1769 - GATE(0, "jtag", "ext_jtag", CLK_IGNORE_UNUSED,
1770 + GATE(0, "jtag", "ext_jtag", 0,
1771 RK3288_CLKGATE_CON(4), 14, GFLAGS),
1772
1773 COMPOSITE_NODIV(SCLK_USBPHY480M_SRC, "usbphy480m_src", mux_usbphy480m_p, 0,
1774 @@ -635,7 +635,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1775 COMPOSITE_NODIV(SCLK_HSICPHY480M, "sclk_hsicphy480m", mux_hsicphy480m_p, 0,
1776 RK3288_CLKSEL_CON(29), 0, 2, MFLAGS,
1777 RK3288_CLKGATE_CON(3), 6, GFLAGS),
1778 - GATE(0, "hsicphy12m_xin12m", "xin12m", CLK_IGNORE_UNUSED,
1779 + GATE(0, "hsicphy12m_xin12m", "xin12m", 0,
1780 RK3288_CLKGATE_CON(13), 9, GFLAGS),
1781 DIV(0, "hsicphy12m_usbphy", "sclk_hsicphy480m", 0,
1782 RK3288_CLKSEL_CON(11), 8, 6, DFLAGS),
1783 @@ -676,7 +676,7 @@ static struct rockchip_clk_branch rk3288_clk_branches[] __initdata = {
1784 GATE(PCLK_TZPC, "pclk_tzpc", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 3, GFLAGS),
1785 GATE(PCLK_UART2, "pclk_uart2", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 9, GFLAGS),
1786 GATE(PCLK_EFUSE256, "pclk_efuse_256", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 10, GFLAGS),
1787 - GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 11, GFLAGS),
1788 + GATE(PCLK_RKPWM, "pclk_rkpwm", "pclk_cpu", 0, RK3288_CLKGATE_CON(11), 11, GFLAGS),
1789
1790 /* ddrctrl [DDR Controller PHY clock] gates */
1791 GATE(0, "nclk_ddrupctl0", "ddrphy", CLK_IGNORE_UNUSED, RK3288_CLKGATE_CON(11), 4, GFLAGS),
1792 @@ -816,12 +816,9 @@ static const char *const rk3288_critical_clocks[] __initconst = {
1793 "pclk_alive_niu",
1794 "pclk_pd_pmu",
1795 "pclk_pmu_niu",
1796 - "pclk_core_niu",
1797 - "pclk_ddrupctl0",
1798 - "pclk_publ0",
1799 - "pclk_ddrupctl1",
1800 - "pclk_publ1",
1801 "pmu_hclk_otg0",
1802 + /* pwm-regulators on some boards, so handoff-critical later */
1803 + "pclk_rkpwm",
1804 };
1805
1806 static void __iomem *rk3288_cru_base;
1807 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
1808 index 505c9a55d555..d3213594d1a7 100644
1809 --- a/drivers/cpufreq/cpufreq.c
1810 +++ b/drivers/cpufreq/cpufreq.c
1811 @@ -1103,6 +1103,7 @@ static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1812 cpufreq_global_kobject, "policy%u", cpu);
1813 if (ret) {
1814 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1815 + kobject_put(&policy->kobj);
1816 goto err_free_real_cpus;
1817 }
1818
1819 diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
1820 index 6d53f7d9fc7a..69fc5cf4782f 100644
1821 --- a/drivers/cpufreq/cpufreq_governor.c
1822 +++ b/drivers/cpufreq/cpufreq_governor.c
1823 @@ -459,6 +459,8 @@ int cpufreq_dbs_governor_init(struct cpufreq_policy *policy)
1824 /* Failure, so roll back. */
1825 pr_err("initialization failed (dbs_data kobject init error %d)\n", ret);
1826
1827 + kobject_put(&dbs_data->attr_set.kobj);
1828 +
1829 policy->governor_data = NULL;
1830
1831 if (!have_governor_per_policy())
1832 diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
1833 index c2dd43f3f5d8..8d63a6dc8383 100644
1834 --- a/drivers/cpufreq/kirkwood-cpufreq.c
1835 +++ b/drivers/cpufreq/kirkwood-cpufreq.c
1836 @@ -124,13 +124,14 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
1837 priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk");
1838 if (IS_ERR(priv.cpu_clk)) {
1839 dev_err(priv.dev, "Unable to get cpuclk\n");
1840 - return PTR_ERR(priv.cpu_clk);
1841 + err = PTR_ERR(priv.cpu_clk);
1842 + goto out_node;
1843 }
1844
1845 err = clk_prepare_enable(priv.cpu_clk);
1846 if (err) {
1847 dev_err(priv.dev, "Unable to prepare cpuclk\n");
1848 - return err;
1849 + goto out_node;
1850 }
1851
1852 kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000;
1853 @@ -161,20 +162,22 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
1854 goto out_ddr;
1855 }
1856
1857 - of_node_put(np);
1858 - np = NULL;
1859 -
1860 err = cpufreq_register_driver(&kirkwood_cpufreq_driver);
1861 - if (!err)
1862 - return 0;
1863 + if (err) {
1864 + dev_err(priv.dev, "Failed to register cpufreq driver\n");
1865 + goto out_powersave;
1866 + }
1867
1868 - dev_err(priv.dev, "Failed to register cpufreq driver\n");
1869 + of_node_put(np);
1870 + return 0;
1871
1872 +out_powersave:
1873 clk_disable_unprepare(priv.powersave_clk);
1874 out_ddr:
1875 clk_disable_unprepare(priv.ddr_clk);
1876 out_cpu:
1877 clk_disable_unprepare(priv.cpu_clk);
1878 +out_node:
1879 of_node_put(np);
1880
1881 return err;
1882 diff --git a/drivers/cpufreq/pasemi-cpufreq.c b/drivers/cpufreq/pasemi-cpufreq.c
1883 index 75dfbd2a58ea..c7710c149de8 100644
1884 --- a/drivers/cpufreq/pasemi-cpufreq.c
1885 +++ b/drivers/cpufreq/pasemi-cpufreq.c
1886 @@ -146,6 +146,7 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy)
1887
1888 cpu = of_get_cpu_node(policy->cpu, NULL);
1889
1890 + of_node_put(cpu);
1891 if (!cpu)
1892 goto out;
1893
1894 diff --git a/drivers/cpufreq/pmac32-cpufreq.c b/drivers/cpufreq/pmac32-cpufreq.c
1895 index 61ae06ca008e..e225edb5c359 100644
1896 --- a/drivers/cpufreq/pmac32-cpufreq.c
1897 +++ b/drivers/cpufreq/pmac32-cpufreq.c
1898 @@ -552,6 +552,7 @@ static int pmac_cpufreq_init_7447A(struct device_node *cpunode)
1899 volt_gpio_np = of_find_node_by_name(NULL, "cpu-vcore-select");
1900 if (volt_gpio_np)
1901 voltage_gpio = read_gpio(volt_gpio_np);
1902 + of_node_put(volt_gpio_np);
1903 if (!voltage_gpio){
1904 pr_err("missing cpu-vcore-select gpio\n");
1905 return 1;
1906 @@ -588,6 +589,7 @@ static int pmac_cpufreq_init_750FX(struct device_node *cpunode)
1907 if (volt_gpio_np)
1908 voltage_gpio = read_gpio(volt_gpio_np);
1909
1910 + of_node_put(volt_gpio_np);
1911 pvr = mfspr(SPRN_PVR);
1912 has_cpu_l2lve = !((pvr & 0xf00) == 0x100);
1913
1914 diff --git a/drivers/cpufreq/ppc_cbe_cpufreq.c b/drivers/cpufreq/ppc_cbe_cpufreq.c
1915 index 41a0f0be3f9f..8414c3a4ea08 100644
1916 --- a/drivers/cpufreq/ppc_cbe_cpufreq.c
1917 +++ b/drivers/cpufreq/ppc_cbe_cpufreq.c
1918 @@ -86,6 +86,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
1919 if (!cbe_get_cpu_pmd_regs(policy->cpu) ||
1920 !cbe_get_cpu_mic_tm_regs(policy->cpu)) {
1921 pr_info("invalid CBE regs pointers for cpufreq\n");
1922 + of_node_put(cpu);
1923 return -EINVAL;
1924 }
1925
1926 diff --git a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
1927 index a4b5ff2b72f8..f6936bb3b7be 100644
1928 --- a/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
1929 +++ b/drivers/crypto/sunxi-ss/sun4i-ss-hash.c
1930 @@ -240,7 +240,10 @@ static int sun4i_hash(struct ahash_request *areq)
1931 }
1932 } else {
1933 /* Since we have the flag final, we can go up to modulo 4 */
1934 - end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
1935 + if (areq->nbytes < 4)
1936 + end = 0;
1937 + else
1938 + end = ((areq->nbytes + op->len) / 4) * 4 - op->len;
1939 }
1940
1941 /* TODO if SGlen % 4 and !op->len then DMA */
1942 diff --git a/drivers/crypto/vmx/aesp8-ppc.pl b/drivers/crypto/vmx/aesp8-ppc.pl
1943 index de78282b8f44..9c6b5c1d6a1a 100644
1944 --- a/drivers/crypto/vmx/aesp8-ppc.pl
1945 +++ b/drivers/crypto/vmx/aesp8-ppc.pl
1946 @@ -1357,7 +1357,7 @@ Loop_ctr32_enc:
1947 addi $idx,$idx,16
1948 bdnz Loop_ctr32_enc
1949
1950 - vadduwm $ivec,$ivec,$one
1951 + vadduqm $ivec,$ivec,$one
1952 vmr $dat,$inptail
1953 lvx $inptail,0,$inp
1954 addi $inp,$inp,16
1955 diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
1956 index a75b95fac3bd..db5b8fe1dd4a 100644
1957 --- a/drivers/dma/at_xdmac.c
1958 +++ b/drivers/dma/at_xdmac.c
1959 @@ -1606,7 +1606,11 @@ static void at_xdmac_tasklet(unsigned long data)
1960 struct at_xdmac_desc,
1961 xfer_node);
1962 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1963 - BUG_ON(!desc->active_xfer);
1964 + if (!desc->active_xfer) {
1965 + dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1966 + spin_unlock_bh(&atchan->lock);
1967 + return;
1968 + }
1969
1970 txd = &desc->tx_dma_desc;
1971
1972 diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
1973 index 88750a34e859..bc8050c025b7 100644
1974 --- a/drivers/dma/pl330.c
1975 +++ b/drivers/dma/pl330.c
1976 @@ -961,6 +961,7 @@ static void _stop(struct pl330_thread *thrd)
1977 {
1978 void __iomem *regs = thrd->dmac->base;
1979 u8 insn[6] = {0, 0, 0, 0, 0, 0};
1980 + u32 inten = readl(regs + INTEN);
1981
1982 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
1983 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1984 @@ -973,10 +974,13 @@ static void _stop(struct pl330_thread *thrd)
1985
1986 _emit_KILL(0, insn);
1987
1988 - /* Stop generating interrupts for SEV */
1989 - writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
1990 -
1991 _execute_DBGINSN(thrd, insn, is_manager(thrd));
1992 +
1993 + /* clear the event */
1994 + if (inten & (1 << thrd->ev))
1995 + writel(1 << thrd->ev, regs + INTCLR);
1996 + /* Stop generating interrupts for SEV */
1997 + writel(inten & ~(1 << thrd->ev), regs + INTEN);
1998 }
1999
2000 /* Start doing req 'idx' of thread 'thrd' */
2001 diff --git a/drivers/dma/tegra210-adma.c b/drivers/dma/tegra210-adma.c
2002 index b26256f23d67..09b6756366c3 100644
2003 --- a/drivers/dma/tegra210-adma.c
2004 +++ b/drivers/dma/tegra210-adma.c
2005 @@ -22,7 +22,6 @@
2006 #include <linux/of_device.h>
2007 #include <linux/of_dma.h>
2008 #include <linux/of_irq.h>
2009 -#include <linux/pm_clock.h>
2010 #include <linux/pm_runtime.h>
2011 #include <linux/slab.h>
2012
2013 @@ -141,6 +140,7 @@ struct tegra_adma {
2014 struct dma_device dma_dev;
2015 struct device *dev;
2016 void __iomem *base_addr;
2017 + struct clk *ahub_clk;
2018 unsigned int nr_channels;
2019 unsigned long rx_requests_reserved;
2020 unsigned long tx_requests_reserved;
2021 @@ -637,8 +637,9 @@ static int tegra_adma_runtime_suspend(struct device *dev)
2022 struct tegra_adma *tdma = dev_get_drvdata(dev);
2023
2024 tdma->global_cmd = tdma_read(tdma, ADMA_GLOBAL_CMD);
2025 + clk_disable_unprepare(tdma->ahub_clk);
2026
2027 - return pm_clk_suspend(dev);
2028 + return 0;
2029 }
2030
2031 static int tegra_adma_runtime_resume(struct device *dev)
2032 @@ -646,10 +647,11 @@ static int tegra_adma_runtime_resume(struct device *dev)
2033 struct tegra_adma *tdma = dev_get_drvdata(dev);
2034 int ret;
2035
2036 - ret = pm_clk_resume(dev);
2037 - if (ret)
2038 + ret = clk_prepare_enable(tdma->ahub_clk);
2039 + if (ret) {
2040 + dev_err(dev, "ahub clk_enable failed: %d\n", ret);
2041 return ret;
2042 -
2043 + }
2044 tdma_write(tdma, ADMA_GLOBAL_CMD, tdma->global_cmd);
2045
2046 return 0;
2047 @@ -692,13 +694,11 @@ static int tegra_adma_probe(struct platform_device *pdev)
2048 if (IS_ERR(tdma->base_addr))
2049 return PTR_ERR(tdma->base_addr);
2050
2051 - ret = pm_clk_create(&pdev->dev);
2052 - if (ret)
2053 - return ret;
2054 -
2055 - ret = of_pm_clk_add_clk(&pdev->dev, "d_audio");
2056 - if (ret)
2057 - goto clk_destroy;
2058 + tdma->ahub_clk = devm_clk_get(&pdev->dev, "d_audio");
2059 + if (IS_ERR(tdma->ahub_clk)) {
2060 + dev_err(&pdev->dev, "Error: Missing ahub controller clock\n");
2061 + return PTR_ERR(tdma->ahub_clk);
2062 + }
2063
2064 pm_runtime_enable(&pdev->dev);
2065
2066 @@ -775,8 +775,6 @@ rpm_put:
2067 pm_runtime_put_sync(&pdev->dev);
2068 rpm_disable:
2069 pm_runtime_disable(&pdev->dev);
2070 -clk_destroy:
2071 - pm_clk_destroy(&pdev->dev);
2072
2073 return ret;
2074 }
2075 @@ -786,6 +784,7 @@ static int tegra_adma_remove(struct platform_device *pdev)
2076 struct tegra_adma *tdma = platform_get_drvdata(pdev);
2077 int i;
2078
2079 + of_dma_controller_free(pdev->dev.of_node);
2080 dma_async_device_unregister(&tdma->dma_dev);
2081
2082 for (i = 0; i < tdma->nr_channels; ++i)
2083 @@ -793,7 +792,6 @@ static int tegra_adma_remove(struct platform_device *pdev)
2084
2085 pm_runtime_put_sync(&pdev->dev);
2086 pm_runtime_disable(&pdev->dev);
2087 - pm_clk_destroy(&pdev->dev);
2088
2089 return 0;
2090 }
2091 diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
2092 index da0e9bc4262f..9327479c719c 100644
2093 --- a/drivers/extcon/extcon-arizona.c
2094 +++ b/drivers/extcon/extcon-arizona.c
2095 @@ -1726,6 +1726,16 @@ static int arizona_extcon_remove(struct platform_device *pdev)
2096 struct arizona_extcon_info *info = platform_get_drvdata(pdev);
2097 struct arizona *arizona = info->arizona;
2098 int jack_irq_rise, jack_irq_fall;
2099 + bool change;
2100 +
2101 + regmap_update_bits_check(arizona->regmap, ARIZONA_MIC_DETECT_1,
2102 + ARIZONA_MICD_ENA, 0,
2103 + &change);
2104 +
2105 + if (change) {
2106 + regulator_disable(info->micvdd);
2107 + pm_runtime_put(info->dev);
2108 + }
2109
2110 gpiod_put(info->micd_pol_gpio);
2111
2112 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
2113 index 7056925eb386..869ff624b108 100644
2114 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
2115 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
2116 @@ -136,8 +136,9 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
2117 {
2118 struct amdgpu_device *adev = ring->adev;
2119 struct amdgpu_fence *fence;
2120 - struct dma_fence *old, **ptr;
2121 + struct dma_fence __rcu **ptr;
2122 uint32_t seq;
2123 + int r;
2124
2125 fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
2126 if (fence == NULL)
2127 @@ -153,15 +154,24 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, struct dma_fence **f,
2128 seq, flags | AMDGPU_FENCE_FLAG_INT);
2129
2130 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask];
2131 + if (unlikely(rcu_dereference_protected(*ptr, 1))) {
2132 + struct dma_fence *old;
2133 +
2134 + rcu_read_lock();
2135 + old = dma_fence_get_rcu_safe(ptr);
2136 + rcu_read_unlock();
2137 +
2138 + if (old) {
2139 + r = dma_fence_wait(old, false);
2140 + dma_fence_put(old);
2141 + if (r)
2142 + return r;
2143 + }
2144 + }
2145 +
2146 /* This function can't be called concurrently anyway, otherwise
2147 * emitting the fence would mess up the hardware ring buffer.
2148 */
2149 - old = rcu_dereference_protected(*ptr, 1);
2150 - if (old && !dma_fence_is_signaled(old)) {
2151 - DRM_INFO("rcu slot is busy\n");
2152 - dma_fence_wait(old, false);
2153 - }
2154 -
2155 rcu_assign_pointer(*ptr, dma_fence_get(&fence->base));
2156
2157 *f = &fence->base;
2158 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2159 index 76ee2de43ea6..dac7978f5ee1 100644
2160 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2161 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
2162 @@ -4369,8 +4369,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
2163 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
2164 struct dc_stream_state *stream_state)
2165 {
2166 - stream_state->mode_changed =
2167 - crtc_state->mode_changed || crtc_state->active_changed;
2168 + stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state);
2169 }
2170
2171 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
2172 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
2173 index 87bf422f16be..e0a96abb3c46 100644
2174 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
2175 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
2176 @@ -1401,10 +1401,12 @@ bool dc_remove_plane_from_context(
2177 * For head pipe detach surfaces from pipe for tail
2178 * pipe just zero it out
2179 */
2180 - if (!pipe_ctx->top_pipe) {
2181 + if (!pipe_ctx->top_pipe ||
2182 + (!pipe_ctx->top_pipe->top_pipe &&
2183 + pipe_ctx->top_pipe->stream_res.opp != pipe_ctx->stream_res.opp)) {
2184 pipe_ctx->plane_state = NULL;
2185 pipe_ctx->bottom_pipe = NULL;
2186 - } else {
2187 + } else {
2188 memset(pipe_ctx, 0, sizeof(*pipe_ctx));
2189 }
2190 }
2191 diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
2192 index 4a863a5dab41..321af9af95e8 100644
2193 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
2194 +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp_dscl.c
2195 @@ -406,15 +406,25 @@ void dpp1_dscl_calc_lb_num_partitions(
2196 int *num_part_y,
2197 int *num_part_c)
2198 {
2199 + int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a,
2200 + lb_bpc, memory_line_size_y, memory_line_size_c, memory_line_size_a;
2201 +
2202 int line_size = scl_data->viewport.width < scl_data->recout.width ?
2203 scl_data->viewport.width : scl_data->recout.width;
2204 int line_size_c = scl_data->viewport_c.width < scl_data->recout.width ?
2205 scl_data->viewport_c.width : scl_data->recout.width;
2206 - int lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
2207 - int memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
2208 - int memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
2209 - int memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
2210 - int lb_memory_size, lb_memory_size_c, lb_memory_size_a, num_partitions_a;
2211 +
2212 + if (line_size == 0)
2213 + line_size = 1;
2214 +
2215 + if (line_size_c == 0)
2216 + line_size_c = 1;
2217 +
2218 +
2219 + lb_bpc = dpp1_dscl_get_lb_depth_bpc(scl_data->lb_params.depth);
2220 + memory_line_size_y = (line_size * lb_bpc + 71) / 72; /* +71 to ceil */
2221 + memory_line_size_c = (line_size_c * lb_bpc + 71) / 72; /* +71 to ceil */
2222 + memory_line_size_a = (line_size + 5) / 6; /* +5 to ceil */
2223
2224 if (lb_config == LB_MEMORY_CONFIG_1) {
2225 lb_memory_size = 816;
2226 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
2227 index 0201ccb22f4c..d8ae4ca129c7 100644
2228 --- a/drivers/gpu/drm/drm_drv.c
2229 +++ b/drivers/gpu/drm/drm_drv.c
2230 @@ -499,7 +499,7 @@ int drm_dev_init(struct drm_device *dev,
2231 }
2232
2233 kref_init(&dev->ref);
2234 - dev->dev = parent;
2235 + dev->dev = get_device(parent);
2236 dev->driver = driver;
2237
2238 INIT_LIST_HEAD(&dev->filelist);
2239 @@ -568,6 +568,7 @@ err_minors:
2240 drm_minor_free(dev, DRM_MINOR_RENDER);
2241 drm_fs_inode_free(dev->anon_inode);
2242 err_free:
2243 + put_device(dev->dev);
2244 mutex_destroy(&dev->master_mutex);
2245 mutex_destroy(&dev->ctxlist_mutex);
2246 mutex_destroy(&dev->clientlist_mutex);
2247 @@ -603,6 +604,8 @@ void drm_dev_fini(struct drm_device *dev)
2248 drm_minor_free(dev, DRM_MINOR_PRIMARY);
2249 drm_minor_free(dev, DRM_MINOR_RENDER);
2250
2251 + put_device(dev->dev);
2252 +
2253 mutex_destroy(&dev->master_mutex);
2254 mutex_destroy(&dev->ctxlist_mutex);
2255 mutex_destroy(&dev->clientlist_mutex);
2256 diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
2257 index e4ccb52c67ea..334addaca9c5 100644
2258 --- a/drivers/gpu/drm/drm_file.c
2259 +++ b/drivers/gpu/drm/drm_file.c
2260 @@ -567,6 +567,7 @@ put_back_event:
2261 file_priv->event_space -= length;
2262 list_add(&e->link, &file_priv->event_list);
2263 spin_unlock_irq(&dev->event_lock);
2264 + wake_up_interruptible(&file_priv->event_wait);
2265 break;
2266 }
2267
2268 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
2269 index 83c1f46670bf..00675fcbffa2 100644
2270 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
2271 +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
2272 @@ -527,6 +527,9 @@ static int etnaviv_bind(struct device *dev)
2273 }
2274 drm->dev_private = priv;
2275
2276 + dev->dma_parms = &priv->dma_parms;
2277 + dma_set_max_seg_size(dev, SZ_2G);
2278 +
2279 mutex_init(&priv->gem_lock);
2280 INIT_LIST_HEAD(&priv->gem_list);
2281 priv->num_gpus = 0;
2282 @@ -564,6 +567,8 @@ static void etnaviv_unbind(struct device *dev)
2283
2284 component_unbind_all(dev, drm);
2285
2286 + dev->dma_parms = NULL;
2287 +
2288 drm->dev_private = NULL;
2289 kfree(priv);
2290
2291 diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
2292 index 8d02d1b7dcf5..b2930d1fe97c 100644
2293 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
2294 +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
2295 @@ -43,6 +43,7 @@ struct etnaviv_file_private {
2296
2297 struct etnaviv_drm_private {
2298 int num_gpus;
2299 + struct device_dma_parameters dma_parms;
2300 struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
2301
2302 /* list of GEM objects: */
2303 diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2304 index ab1d9308c311..ba6f3c14495c 100644
2305 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2306 +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
2307 @@ -35,7 +35,7 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
2308 {
2309 struct device *dev = &gpu->pdev->dev;
2310 const struct firmware *fw;
2311 - struct device_node *np;
2312 + struct device_node *np, *mem_np;
2313 struct resource r;
2314 phys_addr_t mem_phys;
2315 ssize_t mem_size;
2316 @@ -49,11 +49,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
2317 if (!np)
2318 return -ENODEV;
2319
2320 - np = of_parse_phandle(np, "memory-region", 0);
2321 - if (!np)
2322 + mem_np = of_parse_phandle(np, "memory-region", 0);
2323 + of_node_put(np);
2324 + if (!mem_np)
2325 return -EINVAL;
2326
2327 - ret = of_address_to_resource(np, 0, &r);
2328 + ret = of_address_to_resource(mem_np, 0, &r);
2329 + of_node_put(mem_np);
2330 if (ret)
2331 return ret;
2332
2333 diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
2334 index 157b076a1272..38c9c086754b 100644
2335 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
2336 +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bar/nv50.c
2337 @@ -109,7 +109,7 @@ nv50_bar_oneinit(struct nvkm_bar *base)
2338 struct nvkm_device *device = bar->base.subdev.device;
2339 static struct lock_class_key bar1_lock;
2340 static struct lock_class_key bar2_lock;
2341 - u64 start, limit;
2342 + u64 start, limit, size;
2343 int ret;
2344
2345 ret = nvkm_gpuobj_new(device, 0x20000, 0, false, NULL, &bar->mem);
2346 @@ -127,7 +127,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
2347
2348 /* BAR2 */
2349 start = 0x0100000000ULL;
2350 - limit = start + device->func->resource_size(device, 3);
2351 + size = device->func->resource_size(device, 3);
2352 + if (!size)
2353 + return -ENOMEM;
2354 + limit = start + size;
2355
2356 ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
2357 &bar2_lock, "bar2", &bar->bar2_vmm);
2358 @@ -164,7 +167,10 @@ nv50_bar_oneinit(struct nvkm_bar *base)
2359
2360 /* BAR1 */
2361 start = 0x0000000000ULL;
2362 - limit = start + device->func->resource_size(device, 1);
2363 + size = device->func->resource_size(device, 1);
2364 + if (!size)
2365 + return -ENOMEM;
2366 + limit = start + size;
2367
2368 ret = nvkm_vmm_new(device, start, limit-- - start, NULL, 0,
2369 &bar1_lock, "bar1", &bar->bar1_vmm);
2370 diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
2371 index 74467b308721..8160954ebc25 100644
2372 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c
2373 +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
2374 @@ -1386,12 +1386,9 @@ static int dsi_pll_enable(struct dss_pll *pll)
2375 */
2376 dsi_enable_scp_clk(dsi);
2377
2378 - if (!dsi->vdds_dsi_enabled) {
2379 - r = regulator_enable(dsi->vdds_dsi_reg);
2380 - if (r)
2381 - goto err0;
2382 - dsi->vdds_dsi_enabled = true;
2383 - }
2384 + r = regulator_enable(dsi->vdds_dsi_reg);
2385 + if (r)
2386 + goto err0;
2387
2388 /* XXX PLL does not come out of reset without this... */
2389 dispc_pck_free_enable(dsi->dss->dispc, 1);
2390 @@ -1416,36 +1413,25 @@ static int dsi_pll_enable(struct dss_pll *pll)
2391
2392 return 0;
2393 err1:
2394 - if (dsi->vdds_dsi_enabled) {
2395 - regulator_disable(dsi->vdds_dsi_reg);
2396 - dsi->vdds_dsi_enabled = false;
2397 - }
2398 + regulator_disable(dsi->vdds_dsi_reg);
2399 err0:
2400 dsi_disable_scp_clk(dsi);
2401 dsi_runtime_put(dsi);
2402 return r;
2403 }
2404
2405 -static void dsi_pll_uninit(struct dsi_data *dsi, bool disconnect_lanes)
2406 +static void dsi_pll_disable(struct dss_pll *pll)
2407 {
2408 + struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
2409 +
2410 dsi_pll_power(dsi, DSI_PLL_POWER_OFF);
2411 - if (disconnect_lanes) {
2412 - WARN_ON(!dsi->vdds_dsi_enabled);
2413 - regulator_disable(dsi->vdds_dsi_reg);
2414 - dsi->vdds_dsi_enabled = false;
2415 - }
2416 +
2417 + regulator_disable(dsi->vdds_dsi_reg);
2418
2419 dsi_disable_scp_clk(dsi);
2420 dsi_runtime_put(dsi);
2421
2422 - DSSDBG("PLL uninit done\n");
2423 -}
2424 -
2425 -static void dsi_pll_disable(struct dss_pll *pll)
2426 -{
2427 - struct dsi_data *dsi = container_of(pll, struct dsi_data, pll);
2428 -
2429 - dsi_pll_uninit(dsi, true);
2430 + DSSDBG("PLL disable done\n");
2431 }
2432
2433 static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
2434 @@ -4195,11 +4181,11 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
2435
2436 r = dss_pll_enable(&dsi->pll);
2437 if (r)
2438 - goto err0;
2439 + return r;
2440
2441 r = dsi_configure_dsi_clocks(dsi);
2442 if (r)
2443 - goto err1;
2444 + goto err0;
2445
2446 dss_select_dsi_clk_source(dsi->dss, dsi->module_id,
2447 dsi->module_id == 0 ?
2448 @@ -4207,6 +4193,14 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
2449
2450 DSSDBG("PLL OK\n");
2451
2452 + if (!dsi->vdds_dsi_enabled) {
2453 + r = regulator_enable(dsi->vdds_dsi_reg);
2454 + if (r)
2455 + goto err1;
2456 +
2457 + dsi->vdds_dsi_enabled = true;
2458 + }
2459 +
2460 r = dsi_cio_init(dsi);
2461 if (r)
2462 goto err2;
2463 @@ -4235,10 +4229,13 @@ static int dsi_display_init_dsi(struct dsi_data *dsi)
2464 err3:
2465 dsi_cio_uninit(dsi);
2466 err2:
2467 - dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
2468 + regulator_disable(dsi->vdds_dsi_reg);
2469 + dsi->vdds_dsi_enabled = false;
2470 err1:
2471 - dss_pll_disable(&dsi->pll);
2472 + dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
2473 err0:
2474 + dss_pll_disable(&dsi->pll);
2475 +
2476 return r;
2477 }
2478
2479 @@ -4257,7 +4254,12 @@ static void dsi_display_uninit_dsi(struct dsi_data *dsi, bool disconnect_lanes,
2480
2481 dss_select_dsi_clk_source(dsi->dss, dsi->module_id, DSS_CLK_SRC_FCK);
2482 dsi_cio_uninit(dsi);
2483 - dsi_pll_uninit(dsi, disconnect_lanes);
2484 + dss_pll_disable(&dsi->pll);
2485 +
2486 + if (disconnect_lanes) {
2487 + regulator_disable(dsi->vdds_dsi_reg);
2488 + dsi->vdds_dsi_enabled = false;
2489 + }
2490 }
2491
2492 static int dsi_display_enable(struct omap_dss_device *dssdev)
2493 diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
2494 index 87fa316e1d7b..58ccf648b70f 100644
2495 --- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
2496 +++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c
2497 @@ -248,6 +248,9 @@ static int otm8009a_init_sequence(struct otm8009a *ctx)
2498 /* Send Command GRAM memory write (no parameters) */
2499 dcs_write_seq(ctx, MIPI_DCS_WRITE_MEMORY_START);
2500
2501 + /* Wait a short while to let the panel be ready before the 1st frame */
2502 + mdelay(10);
2503 +
2504 return 0;
2505 }
2506
2507 diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
2508 index b9baefdba38a..1c318ad32a8c 100644
2509 --- a/drivers/gpu/drm/pl111/pl111_versatile.c
2510 +++ b/drivers/gpu/drm/pl111/pl111_versatile.c
2511 @@ -330,6 +330,7 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
2512 ret = vexpress_muxfpga_init();
2513 if (ret) {
2514 dev_err(dev, "unable to initialize muxfpga driver\n");
2515 + of_node_put(np);
2516 return ret;
2517 }
2518
2519 @@ -337,17 +338,20 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
2520 pdev = of_find_device_by_node(np);
2521 if (!pdev) {
2522 dev_err(dev, "can't find the sysreg device, deferring\n");
2523 + of_node_put(np);
2524 return -EPROBE_DEFER;
2525 }
2526 map = dev_get_drvdata(&pdev->dev);
2527 if (!map) {
2528 dev_err(dev, "sysreg has not yet probed\n");
2529 platform_device_put(pdev);
2530 + of_node_put(np);
2531 return -EPROBE_DEFER;
2532 }
2533 } else {
2534 map = syscon_node_to_regmap(np);
2535 }
2536 + of_node_put(np);
2537
2538 if (IS_ERR(map)) {
2539 dev_err(dev, "no Versatile syscon regmap\n");
2540 diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
2541 index e3b34a345546..97a0573cc514 100644
2542 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
2543 +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
2544 @@ -357,7 +357,13 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi,
2545 static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi,
2546 struct drm_display_mode *mode)
2547 {
2548 - return mode->vtotal - (mode->vsync_end - mode->vdisplay) + 1;
2549 + u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100);
2550 + u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start;
2551 +
2552 + if (delay > mode->vtotal)
2553 + delay = delay % mode->vtotal;
2554 +
2555 + return max_t(u16, delay, 1);
2556 }
2557
2558 static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi,
2559 diff --git a/drivers/gpu/drm/tinydrm/ili9225.c b/drivers/gpu/drm/tinydrm/ili9225.c
2560 index 455fefe012f5..6044a01069ce 100644
2561 --- a/drivers/gpu/drm/tinydrm/ili9225.c
2562 +++ b/drivers/gpu/drm/tinydrm/ili9225.c
2563 @@ -278,7 +278,7 @@ static void ili9225_pipe_disable(struct drm_simple_display_pipe *pipe)
2564 mipi->enabled = false;
2565 }
2566
2567 -static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
2568 +static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
2569 size_t num)
2570 {
2571 struct spi_device *spi = mipi->spi;
2572 @@ -288,11 +288,11 @@ static int ili9225_dbi_command(struct mipi_dbi *mipi, u8 cmd, u8 *par,
2573
2574 gpiod_set_value_cansleep(mipi->dc, 0);
2575 speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
2576 - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
2577 + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
2578 if (ret || !num)
2579 return ret;
2580
2581 - if (cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
2582 + if (*cmd == ILI9225_WRITE_DATA_TO_GRAM && !mipi->swap_bytes)
2583 bpw = 16;
2584
2585 gpiod_set_value_cansleep(mipi->dc, 1);
2586 diff --git a/drivers/gpu/drm/tinydrm/mipi-dbi.c b/drivers/gpu/drm/tinydrm/mipi-dbi.c
2587 index cb3441e51d5f..e772a8a9da80 100644
2588 --- a/drivers/gpu/drm/tinydrm/mipi-dbi.c
2589 +++ b/drivers/gpu/drm/tinydrm/mipi-dbi.c
2590 @@ -144,16 +144,42 @@ EXPORT_SYMBOL(mipi_dbi_command_read);
2591 */
2592 int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
2593 {
2594 + u8 *cmdbuf;
2595 int ret;
2596
2597 + /* SPI requires dma-safe buffers */
2598 + cmdbuf = kmemdup(&cmd, 1, GFP_KERNEL);
2599 + if (!cmdbuf)
2600 + return -ENOMEM;
2601 +
2602 mutex_lock(&mipi->cmdlock);
2603 - ret = mipi->command(mipi, cmd, data, len);
2604 + ret = mipi->command(mipi, cmdbuf, data, len);
2605 mutex_unlock(&mipi->cmdlock);
2606
2607 + kfree(cmdbuf);
2608 +
2609 return ret;
2610 }
2611 EXPORT_SYMBOL(mipi_dbi_command_buf);
2612
2613 +/* This should only be used by mipi_dbi_command() */
2614 +int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len)
2615 +{
2616 + u8 *buf;
2617 + int ret;
2618 +
2619 + buf = kmemdup(data, len, GFP_KERNEL);
2620 + if (!buf)
2621 + return -ENOMEM;
2622 +
2623 + ret = mipi_dbi_command_buf(mipi, cmd, buf, len);
2624 +
2625 + kfree(buf);
2626 +
2627 + return ret;
2628 +}
2629 +EXPORT_SYMBOL(mipi_dbi_command_stackbuf);
2630 +
2631 /**
2632 * mipi_dbi_buf_copy - Copy a framebuffer, transforming it if necessary
2633 * @dst: The destination buffer
2634 @@ -741,18 +767,18 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *mipi, int dc,
2635 return 0;
2636 }
2637
2638 -static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
2639 +static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 *cmd,
2640 u8 *parameters, size_t num)
2641 {
2642 - unsigned int bpw = (cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
2643 + unsigned int bpw = (*cmd == MIPI_DCS_WRITE_MEMORY_START) ? 16 : 8;
2644 int ret;
2645
2646 - if (mipi_dbi_command_is_read(mipi, cmd))
2647 + if (mipi_dbi_command_is_read(mipi, *cmd))
2648 return -ENOTSUPP;
2649
2650 - MIPI_DBI_DEBUG_COMMAND(cmd, parameters, num);
2651 + MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num);
2652
2653 - ret = mipi_dbi_spi1_transfer(mipi, 0, &cmd, 1, 8);
2654 + ret = mipi_dbi_spi1_transfer(mipi, 0, cmd, 1, 8);
2655 if (ret || !num)
2656 return ret;
2657
2658 @@ -761,7 +787,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *mipi, u8 cmd,
2659
2660 /* MIPI DBI Type C Option 3 */
2661
2662 -static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
2663 +static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 *cmd,
2664 u8 *data, size_t len)
2665 {
2666 struct spi_device *spi = mipi->spi;
2667 @@ -770,7 +796,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
2668 struct spi_transfer tr[2] = {
2669 {
2670 .speed_hz = speed_hz,
2671 - .tx_buf = &cmd,
2672 + .tx_buf = cmd,
2673 .len = 1,
2674 }, {
2675 .speed_hz = speed_hz,
2676 @@ -788,8 +814,8 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
2677 * Support non-standard 24-bit and 32-bit Nokia read commands which
2678 * start with a dummy clock, so we need to read an extra byte.
2679 */
2680 - if (cmd == MIPI_DCS_GET_DISPLAY_ID ||
2681 - cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
2682 + if (*cmd == MIPI_DCS_GET_DISPLAY_ID ||
2683 + *cmd == MIPI_DCS_GET_DISPLAY_STATUS) {
2684 if (!(len == 3 || len == 4))
2685 return -EINVAL;
2686
2687 @@ -819,7 +845,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *mipi, u8 cmd,
2688 data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7));
2689 }
2690
2691 - MIPI_DBI_DEBUG_COMMAND(cmd, data, len);
2692 + MIPI_DBI_DEBUG_COMMAND(*cmd, data, len);
2693
2694 err_free:
2695 kfree(buf);
2696 @@ -827,7 +853,7 @@ err_free:
2697 return ret;
2698 }
2699
2700 -static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
2701 +static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 *cmd,
2702 u8 *par, size_t num)
2703 {
2704 struct spi_device *spi = mipi->spi;
2705 @@ -835,18 +861,18 @@ static int mipi_dbi_typec3_command(struct mipi_dbi *mipi, u8 cmd,
2706 u32 speed_hz;
2707 int ret;
2708
2709 - if (mipi_dbi_command_is_read(mipi, cmd))
2710 + if (mipi_dbi_command_is_read(mipi, *cmd))
2711 return mipi_dbi_typec3_command_read(mipi, cmd, par, num);
2712
2713 - MIPI_DBI_DEBUG_COMMAND(cmd, par, num);
2714 + MIPI_DBI_DEBUG_COMMAND(*cmd, par, num);
2715
2716 gpiod_set_value_cansleep(mipi->dc, 0);
2717 speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 1);
2718 - ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, &cmd, 1);
2719 + ret = tinydrm_spi_transfer(spi, speed_hz, NULL, 8, cmd, 1);
2720 if (ret || !num)
2721 return ret;
2722
2723 - if (cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
2724 + if (*cmd == MIPI_DCS_WRITE_MEMORY_START && !mipi->swap_bytes)
2725 bpw = 16;
2726
2727 gpiod_set_value_cansleep(mipi->dc, 1);
2728 diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
2729 index 2a85fa68ffea..2a4c6187e675 100644
2730 --- a/drivers/gpu/drm/v3d/v3d_drv.c
2731 +++ b/drivers/gpu/drm/v3d/v3d_drv.c
2732 @@ -305,14 +305,18 @@ static int v3d_platform_drm_probe(struct platform_device *pdev)
2733 if (ret)
2734 goto dev_destroy;
2735
2736 - v3d_irq_init(v3d);
2737 + ret = v3d_irq_init(v3d);
2738 + if (ret)
2739 + goto gem_destroy;
2740
2741 ret = drm_dev_register(drm, 0);
2742 if (ret)
2743 - goto gem_destroy;
2744 + goto irq_disable;
2745
2746 return 0;
2747
2748 +irq_disable:
2749 + v3d_irq_disable(v3d);
2750 gem_destroy:
2751 v3d_gem_destroy(drm);
2752 dev_destroy:
2753 diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
2754 index e6fed696ad86..0ad73f4b7509 100644
2755 --- a/drivers/gpu/drm/v3d/v3d_drv.h
2756 +++ b/drivers/gpu/drm/v3d/v3d_drv.h
2757 @@ -284,7 +284,7 @@ void v3d_invalidate_caches(struct v3d_dev *v3d);
2758 void v3d_flush_caches(struct v3d_dev *v3d);
2759
2760 /* v3d_irq.c */
2761 -void v3d_irq_init(struct v3d_dev *v3d);
2762 +int v3d_irq_init(struct v3d_dev *v3d);
2763 void v3d_irq_enable(struct v3d_dev *v3d);
2764 void v3d_irq_disable(struct v3d_dev *v3d);
2765 void v3d_irq_reset(struct v3d_dev *v3d);
2766 diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
2767 index e07514eb11b5..22be0f2dff99 100644
2768 --- a/drivers/gpu/drm/v3d/v3d_irq.c
2769 +++ b/drivers/gpu/drm/v3d/v3d_irq.c
2770 @@ -137,7 +137,7 @@ v3d_hub_irq(int irq, void *arg)
2771 return status;
2772 }
2773
2774 -void
2775 +int
2776 v3d_irq_init(struct v3d_dev *v3d)
2777 {
2778 int ret, core;
2779 @@ -154,13 +154,22 @@ v3d_irq_init(struct v3d_dev *v3d)
2780 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 0),
2781 v3d_hub_irq, IRQF_SHARED,
2782 "v3d_hub", v3d);
2783 + if (ret)
2784 + goto fail;
2785 +
2786 ret = devm_request_irq(v3d->dev, platform_get_irq(v3d->pdev, 1),
2787 v3d_irq, IRQF_SHARED,
2788 "v3d_core0", v3d);
2789 if (ret)
2790 - dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
2791 + goto fail;
2792
2793 v3d_irq_enable(v3d);
2794 + return 0;
2795 +
2796 +fail:
2797 + if (ret != -EPROBE_DEFER)
2798 + dev_err(v3d->dev, "IRQ setup failed: %d\n", ret);
2799 + return ret;
2800 }
2801
2802 void
2803 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
2804 index 44564f61e9cc..861375561156 100644
2805 --- a/drivers/hid/hid-core.c
2806 +++ b/drivers/hid/hid-core.c
2807 @@ -215,13 +215,14 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
2808 * Add a usage to the temporary parser table.
2809 */
2810
2811 -static int hid_add_usage(struct hid_parser *parser, unsigned usage)
2812 +static int hid_add_usage(struct hid_parser *parser, unsigned usage, u8 size)
2813 {
2814 if (parser->local.usage_index >= HID_MAX_USAGES) {
2815 hid_err(parser->device, "usage index exceeded\n");
2816 return -1;
2817 }
2818 parser->local.usage[parser->local.usage_index] = usage;
2819 + parser->local.usage_size[parser->local.usage_index] = size;
2820 parser->local.collection_index[parser->local.usage_index] =
2821 parser->collection_stack_ptr ?
2822 parser->collection_stack[parser->collection_stack_ptr - 1] : 0;
2823 @@ -482,10 +483,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
2824 return 0;
2825 }
2826
2827 - if (item->size <= 2)
2828 - data = (parser->global.usage_page << 16) + data;
2829 -
2830 - return hid_add_usage(parser, data);
2831 + return hid_add_usage(parser, data, item->size);
2832
2833 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
2834
2835 @@ -494,9 +492,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
2836 return 0;
2837 }
2838
2839 - if (item->size <= 2)
2840 - data = (parser->global.usage_page << 16) + data;
2841 -
2842 parser->local.usage_minimum = data;
2843 return 0;
2844
2845 @@ -507,9 +502,6 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
2846 return 0;
2847 }
2848
2849 - if (item->size <= 2)
2850 - data = (parser->global.usage_page << 16) + data;
2851 -
2852 count = data - parser->local.usage_minimum;
2853 if (count + parser->local.usage_index >= HID_MAX_USAGES) {
2854 /*
2855 @@ -529,7 +521,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
2856 }
2857
2858 for (n = parser->local.usage_minimum; n <= data; n++)
2859 - if (hid_add_usage(parser, n)) {
2860 + if (hid_add_usage(parser, n, item->size)) {
2861 dbg_hid("hid_add_usage failed\n");
2862 return -1;
2863 }
2864 @@ -543,6 +535,22 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
2865 return 0;
2866 }
2867
2868 +/*
2869 + * Concatenate Usage Pages into Usages where relevant:
2870 + * As per specification, 6.2.2.8: "When the parser encounters a main item it
2871 + * concatenates the last declared Usage Page with a Usage to form a complete
2872 + * usage value."
2873 + */
2874 +
2875 +static void hid_concatenate_usage_page(struct hid_parser *parser)
2876 +{
2877 + int i;
2878 +
2879 + for (i = 0; i < parser->local.usage_index; i++)
2880 + if (parser->local.usage_size[i] <= 2)
2881 + parser->local.usage[i] += parser->global.usage_page << 16;
2882 +}
2883 +
2884 /*
2885 * Process a main item.
2886 */
2887 @@ -552,6 +560,8 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
2888 __u32 data;
2889 int ret;
2890
2891 + hid_concatenate_usage_page(parser);
2892 +
2893 data = item_udata(item);
2894
2895 switch (item->tag) {
2896 @@ -761,6 +771,8 @@ static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
2897 __u32 data;
2898 int i;
2899
2900 + hid_concatenate_usage_page(parser);
2901 +
2902 data = item_udata(item);
2903
2904 switch (item->tag) {
2905 diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
2906 index 8425d3548a41..e642cfaf303b 100644
2907 --- a/drivers/hid/hid-logitech-hidpp.c
2908 +++ b/drivers/hid/hid-logitech-hidpp.c
2909 @@ -725,13 +725,16 @@ static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
2910
2911 static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
2912 {
2913 + const u8 ping_byte = 0x5a;
2914 + u8 ping_data[3] = { 0, 0, ping_byte };
2915 struct hidpp_report response;
2916 int ret;
2917
2918 - ret = hidpp_send_fap_command_sync(hidpp,
2919 + ret = hidpp_send_rap_command_sync(hidpp,
2920 + REPORT_ID_HIDPP_SHORT,
2921 HIDPP_PAGE_ROOT_IDX,
2922 CMD_ROOT_GET_PROTOCOL_VERSION,
2923 - NULL, 0, &response);
2924 + ping_data, sizeof(ping_data), &response);
2925
2926 if (ret == HIDPP_ERROR_INVALID_SUBID) {
2927 hidpp->protocol_major = 1;
2928 @@ -751,8 +754,14 @@ static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
2929 if (ret)
2930 return ret;
2931
2932 - hidpp->protocol_major = response.fap.params[0];
2933 - hidpp->protocol_minor = response.fap.params[1];
2934 + if (response.rap.params[2] != ping_byte) {
2935 + hid_err(hidpp->hid_dev, "%s: ping mismatch 0x%02x != 0x%02x\n",
2936 + __func__, response.rap.params[2], ping_byte);
2937 + return -EPROTO;
2938 + }
2939 +
2940 + hidpp->protocol_major = response.rap.params[0];
2941 + hidpp->protocol_minor = response.rap.params[1];
2942
2943 return ret;
2944 }
2945 @@ -901,7 +910,11 @@ static int hidpp_map_battery_level(int capacity)
2946 {
2947 if (capacity < 11)
2948 return POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
2949 - else if (capacity < 31)
2950 + /*
2951 + * The spec says this should be < 31 but some devices report 30
2952 + * with brand new batteries and Windows reports 30 as "Good".
2953 + */
2954 + else if (capacity < 30)
2955 return POWER_SUPPLY_CAPACITY_LEVEL_LOW;
2956 else if (capacity < 81)
2957 return POWER_SUPPLY_CAPACITY_LEVEL_NORMAL;
2958 diff --git a/drivers/hwmon/f71805f.c b/drivers/hwmon/f71805f.c
2959 index 73c681162653..623736d2a7c1 100644
2960 --- a/drivers/hwmon/f71805f.c
2961 +++ b/drivers/hwmon/f71805f.c
2962 @@ -96,17 +96,23 @@ superio_select(int base, int ld)
2963 outb(ld, base + 1);
2964 }
2965
2966 -static inline void
2967 +static inline int
2968 superio_enter(int base)
2969 {
2970 + if (!request_muxed_region(base, 2, DRVNAME))
2971 + return -EBUSY;
2972 +
2973 outb(0x87, base);
2974 outb(0x87, base);
2975 +
2976 + return 0;
2977 }
2978
2979 static inline void
2980 superio_exit(int base)
2981 {
2982 outb(0xaa, base);
2983 + release_region(base, 2);
2984 }
2985
2986 /*
2987 @@ -1561,7 +1567,7 @@ exit:
2988 static int __init f71805f_find(int sioaddr, unsigned short *address,
2989 struct f71805f_sio_data *sio_data)
2990 {
2991 - int err = -ENODEV;
2992 + int err;
2993 u16 devid;
2994
2995 static const char * const names[] = {
2996 @@ -1569,8 +1575,11 @@ static int __init f71805f_find(int sioaddr, unsigned short *address,
2997 "F71872F/FG or F71806F/FG",
2998 };
2999
3000 - superio_enter(sioaddr);
3001 + err = superio_enter(sioaddr);
3002 + if (err)
3003 + return err;
3004
3005 + err = -ENODEV;
3006 devid = superio_inw(sioaddr, SIO_REG_MANID);
3007 if (devid != SIO_FINTEK_ID)
3008 goto exit;
3009 diff --git a/drivers/hwmon/pc87427.c b/drivers/hwmon/pc87427.c
3010 index dc5a9d5ada51..81a05cd1a512 100644
3011 --- a/drivers/hwmon/pc87427.c
3012 +++ b/drivers/hwmon/pc87427.c
3013 @@ -106,6 +106,13 @@ static const char *logdev_str[2] = { DRVNAME " FMC", DRVNAME " HMC" };
3014 #define LD_IN 1
3015 #define LD_TEMP 1
3016
3017 +static inline int superio_enter(int sioaddr)
3018 +{
3019 + if (!request_muxed_region(sioaddr, 2, DRVNAME))
3020 + return -EBUSY;
3021 + return 0;
3022 +}
3023 +
3024 static inline void superio_outb(int sioaddr, int reg, int val)
3025 {
3026 outb(reg, sioaddr);
3027 @@ -122,6 +129,7 @@ static inline void superio_exit(int sioaddr)
3028 {
3029 outb(0x02, sioaddr);
3030 outb(0x02, sioaddr + 1);
3031 + release_region(sioaddr, 2);
3032 }
3033
3034 /*
3035 @@ -1220,7 +1228,11 @@ static int __init pc87427_find(int sioaddr, struct pc87427_sio_data *sio_data)
3036 {
3037 u16 val;
3038 u8 cfg, cfg_b;
3039 - int i, err = 0;
3040 + int i, err;
3041 +
3042 + err = superio_enter(sioaddr);
3043 + if (err)
3044 + return err;
3045
3046 /* Identify device */
3047 val = force_id ? force_id : superio_inb(sioaddr, SIOREG_DEVID);
3048 diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
3049 index 6bd200756560..cbdb5c4991ae 100644
3050 --- a/drivers/hwmon/smsc47b397.c
3051 +++ b/drivers/hwmon/smsc47b397.c
3052 @@ -72,14 +72,19 @@ static inline void superio_select(int ld)
3053 superio_outb(0x07, ld);
3054 }
3055
3056 -static inline void superio_enter(void)
3057 +static inline int superio_enter(void)
3058 {
3059 + if (!request_muxed_region(REG, 2, DRVNAME))
3060 + return -EBUSY;
3061 +
3062 outb(0x55, REG);
3063 + return 0;
3064 }
3065
3066 static inline void superio_exit(void)
3067 {
3068 outb(0xAA, REG);
3069 + release_region(REG, 2);
3070 }
3071
3072 #define SUPERIO_REG_DEVID 0x20
3073 @@ -300,8 +305,12 @@ static int __init smsc47b397_find(void)
3074 u8 id, rev;
3075 char *name;
3076 unsigned short addr;
3077 + int err;
3078 +
3079 + err = superio_enter();
3080 + if (err)
3081 + return err;
3082
3083 - superio_enter();
3084 id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
3085
3086 switch (id) {
3087 diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
3088 index c7b6a425e2c0..5eeac9853d0a 100644
3089 --- a/drivers/hwmon/smsc47m1.c
3090 +++ b/drivers/hwmon/smsc47m1.c
3091 @@ -73,16 +73,21 @@ superio_inb(int reg)
3092 /* logical device for fans is 0x0A */
3093 #define superio_select() superio_outb(0x07, 0x0A)
3094
3095 -static inline void
3096 +static inline int
3097 superio_enter(void)
3098 {
3099 + if (!request_muxed_region(REG, 2, DRVNAME))
3100 + return -EBUSY;
3101 +
3102 outb(0x55, REG);
3103 + return 0;
3104 }
3105
3106 static inline void
3107 superio_exit(void)
3108 {
3109 outb(0xAA, REG);
3110 + release_region(REG, 2);
3111 }
3112
3113 #define SUPERIO_REG_ACT 0x30
3114 @@ -531,8 +536,12 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
3115 {
3116 u8 val;
3117 unsigned short addr;
3118 + int err;
3119 +
3120 + err = superio_enter();
3121 + if (err)
3122 + return err;
3123
3124 - superio_enter();
3125 val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
3126
3127 /*
3128 @@ -608,13 +617,14 @@ static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
3129 static void smsc47m1_restore(const struct smsc47m1_sio_data *sio_data)
3130 {
3131 if ((sio_data->activate & 0x01) == 0) {
3132 - superio_enter();
3133 - superio_select();
3134 -
3135 - pr_info("Disabling device\n");
3136 - superio_outb(SUPERIO_REG_ACT, sio_data->activate);
3137 -
3138 - superio_exit();
3139 + if (!superio_enter()) {
3140 + superio_select();
3141 + pr_info("Disabling device\n");
3142 + superio_outb(SUPERIO_REG_ACT, sio_data->activate);
3143 + superio_exit();
3144 + } else {
3145 + pr_warn("Failed to disable device\n");
3146 + }
3147 }
3148 }
3149
3150 diff --git a/drivers/hwmon/vt1211.c b/drivers/hwmon/vt1211.c
3151 index 3a6bfa51cb94..95d5e8ec8b7f 100644
3152 --- a/drivers/hwmon/vt1211.c
3153 +++ b/drivers/hwmon/vt1211.c
3154 @@ -226,15 +226,21 @@ static inline void superio_select(int sio_cip, int ldn)
3155 outb(ldn, sio_cip + 1);
3156 }
3157
3158 -static inline void superio_enter(int sio_cip)
3159 +static inline int superio_enter(int sio_cip)
3160 {
3161 + if (!request_muxed_region(sio_cip, 2, DRVNAME))
3162 + return -EBUSY;
3163 +
3164 outb(0x87, sio_cip);
3165 outb(0x87, sio_cip);
3166 +
3167 + return 0;
3168 }
3169
3170 static inline void superio_exit(int sio_cip)
3171 {
3172 outb(0xaa, sio_cip);
3173 + release_region(sio_cip, 2);
3174 }
3175
3176 /* ---------------------------------------------------------------------
3177 @@ -1282,11 +1288,14 @@ EXIT:
3178
3179 static int __init vt1211_find(int sio_cip, unsigned short *address)
3180 {
3181 - int err = -ENODEV;
3182 + int err;
3183 int devid;
3184
3185 - superio_enter(sio_cip);
3186 + err = superio_enter(sio_cip);
3187 + if (err)
3188 + return err;
3189
3190 + err = -ENODEV;
3191 devid = force_id ? force_id : superio_inb(sio_cip, SIO_VT1211_DEVID);
3192 if (devid != SIO_VT1211_ID)
3193 goto EXIT;
3194 diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
3195 index 4a754921fb6f..9421c1ec86f7 100644
3196 --- a/drivers/iio/adc/Kconfig
3197 +++ b/drivers/iio/adc/Kconfig
3198 @@ -696,6 +696,7 @@ config STM32_DFSDM_ADC
3199 depends on (ARCH_STM32 && OF) || COMPILE_TEST
3200 select STM32_DFSDM_CORE
3201 select REGMAP_MMIO
3202 + select IIO_BUFFER
3203 select IIO_BUFFER_HW_CONSUMER
3204 help
3205 Select this option to support ADCSigma delta modulator for
3206 diff --git a/drivers/iio/adc/ad_sigma_delta.c b/drivers/iio/adc/ad_sigma_delta.c
3207 index ae2a5097f449..25af4c76b57f 100644
3208 --- a/drivers/iio/adc/ad_sigma_delta.c
3209 +++ b/drivers/iio/adc/ad_sigma_delta.c
3210 @@ -62,7 +62,7 @@ int ad_sd_write_reg(struct ad_sigma_delta *sigma_delta, unsigned int reg,
3211 struct spi_transfer t = {
3212 .tx_buf = data,
3213 .len = size + 1,
3214 - .cs_change = sigma_delta->bus_locked,
3215 + .cs_change = sigma_delta->keep_cs_asserted,
3216 };
3217 struct spi_message m;
3218 int ret;
3219 @@ -218,6 +218,7 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
3220
3221 spi_bus_lock(sigma_delta->spi->master);
3222 sigma_delta->bus_locked = true;
3223 + sigma_delta->keep_cs_asserted = true;
3224 reinit_completion(&sigma_delta->completion);
3225
3226 ret = ad_sigma_delta_set_mode(sigma_delta, mode);
3227 @@ -235,9 +236,10 @@ static int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
3228 ret = 0;
3229 }
3230 out:
3231 + sigma_delta->keep_cs_asserted = false;
3232 + ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
3233 sigma_delta->bus_locked = false;
3234 spi_bus_unlock(sigma_delta->spi->master);
3235 - ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
3236
3237 return ret;
3238 }
3239 @@ -289,6 +291,7 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
3240
3241 spi_bus_lock(sigma_delta->spi->master);
3242 sigma_delta->bus_locked = true;
3243 + sigma_delta->keep_cs_asserted = true;
3244 reinit_completion(&sigma_delta->completion);
3245
3246 ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_SINGLE);
3247 @@ -298,9 +301,6 @@ int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
3248 ret = wait_for_completion_interruptible_timeout(
3249 &sigma_delta->completion, HZ);
3250
3251 - sigma_delta->bus_locked = false;
3252 - spi_bus_unlock(sigma_delta->spi->master);
3253 -
3254 if (ret == 0)
3255 ret = -EIO;
3256 if (ret < 0)
3257 @@ -316,7 +316,10 @@ out:
3258 sigma_delta->irq_dis = true;
3259 }
3260
3261 + sigma_delta->keep_cs_asserted = false;
3262 ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
3263 + sigma_delta->bus_locked = false;
3264 + spi_bus_unlock(sigma_delta->spi->master);
3265 mutex_unlock(&indio_dev->mlock);
3266
3267 if (ret)
3268 @@ -353,6 +356,8 @@ static int ad_sd_buffer_postenable(struct iio_dev *indio_dev)
3269
3270 spi_bus_lock(sigma_delta->spi->master);
3271 sigma_delta->bus_locked = true;
3272 + sigma_delta->keep_cs_asserted = true;
3273 +
3274 ret = ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_CONTINUOUS);
3275 if (ret)
3276 goto err_unlock;
3277 @@ -381,6 +386,7 @@ static int ad_sd_buffer_postdisable(struct iio_dev *indio_dev)
3278 sigma_delta->irq_dis = true;
3279 }
3280
3281 + sigma_delta->keep_cs_asserted = false;
3282 ad_sigma_delta_set_mode(sigma_delta, AD_SD_MODE_IDLE);
3283
3284 sigma_delta->bus_locked = false;
3285 diff --git a/drivers/iio/adc/ti-ads7950.c b/drivers/iio/adc/ti-ads7950.c
3286 index a5bd5944bc66..c9cd7e5c1b61 100644
3287 --- a/drivers/iio/adc/ti-ads7950.c
3288 +++ b/drivers/iio/adc/ti-ads7950.c
3289 @@ -56,6 +56,9 @@ struct ti_ads7950_state {
3290 struct spi_message ring_msg;
3291 struct spi_message scan_single_msg;
3292
3293 + /* Lock to protect the spi xfer buffers */
3294 + struct mutex slock;
3295 +
3296 struct regulator *reg;
3297 unsigned int vref_mv;
3298
3299 @@ -277,6 +280,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
3300 struct ti_ads7950_state *st = iio_priv(indio_dev);
3301 int ret;
3302
3303 + mutex_lock(&st->slock);
3304 ret = spi_sync(st->spi, &st->ring_msg);
3305 if (ret < 0)
3306 goto out;
3307 @@ -285,6 +289,7 @@ static irqreturn_t ti_ads7950_trigger_handler(int irq, void *p)
3308 iio_get_time_ns(indio_dev));
3309
3310 out:
3311 + mutex_unlock(&st->slock);
3312 iio_trigger_notify_done(indio_dev->trig);
3313
3314 return IRQ_HANDLED;
3315 @@ -295,7 +300,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
3316 struct ti_ads7950_state *st = iio_priv(indio_dev);
3317 int ret, cmd;
3318
3319 - mutex_lock(&indio_dev->mlock);
3320 + mutex_lock(&st->slock);
3321
3322 cmd = TI_ADS7950_CR_WRITE | TI_ADS7950_CR_CHAN(ch) | st->settings;
3323 st->single_tx = cpu_to_be16(cmd);
3324 @@ -307,7 +312,7 @@ static int ti_ads7950_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
3325 ret = be16_to_cpu(st->single_rx);
3326
3327 out:
3328 - mutex_unlock(&indio_dev->mlock);
3329 + mutex_unlock(&st->slock);
3330
3331 return ret;
3332 }
3333 @@ -423,16 +428,19 @@ static int ti_ads7950_probe(struct spi_device *spi)
3334 if (ACPI_COMPANION(&spi->dev))
3335 st->vref_mv = TI_ADS7950_VA_MV_ACPI_DEFAULT;
3336
3337 + mutex_init(&st->slock);
3338 +
3339 st->reg = devm_regulator_get(&spi->dev, "vref");
3340 if (IS_ERR(st->reg)) {
3341 dev_err(&spi->dev, "Failed get get regulator \"vref\"\n");
3342 - return PTR_ERR(st->reg);
3343 + ret = PTR_ERR(st->reg);
3344 + goto error_destroy_mutex;
3345 }
3346
3347 ret = regulator_enable(st->reg);
3348 if (ret) {
3349 dev_err(&spi->dev, "Failed to enable regulator \"vref\"\n");
3350 - return ret;
3351 + goto error_destroy_mutex;
3352 }
3353
3354 ret = iio_triggered_buffer_setup(indio_dev, NULL,
3355 @@ -454,6 +462,8 @@ error_cleanup_ring:
3356 iio_triggered_buffer_cleanup(indio_dev);
3357 error_disable_reg:
3358 regulator_disable(st->reg);
3359 +error_destroy_mutex:
3360 + mutex_destroy(&st->slock);
3361
3362 return ret;
3363 }
3364 @@ -466,6 +476,7 @@ static int ti_ads7950_remove(struct spi_device *spi)
3365 iio_device_unregister(indio_dev);
3366 iio_triggered_buffer_cleanup(indio_dev);
3367 regulator_disable(st->reg);
3368 + mutex_destroy(&st->slock);
3369
3370 return 0;
3371 }
3372 diff --git a/drivers/iio/common/ssp_sensors/ssp_iio.c b/drivers/iio/common/ssp_sensors/ssp_iio.c
3373 index 645f2e3975db..e38f704d88b7 100644
3374 --- a/drivers/iio/common/ssp_sensors/ssp_iio.c
3375 +++ b/drivers/iio/common/ssp_sensors/ssp_iio.c
3376 @@ -81,7 +81,7 @@ int ssp_common_process_data(struct iio_dev *indio_dev, void *buf,
3377 unsigned int len, int64_t timestamp)
3378 {
3379 __le32 time;
3380 - int64_t calculated_time;
3381 + int64_t calculated_time = 0;
3382 struct ssp_sensor_data *spd = iio_priv(indio_dev);
3383
3384 if (indio_dev->scan_bytes == 0)
3385 diff --git a/drivers/iio/magnetometer/hmc5843_i2c.c b/drivers/iio/magnetometer/hmc5843_i2c.c
3386 index 3de7f4426ac4..86abba5827a2 100644
3387 --- a/drivers/iio/magnetometer/hmc5843_i2c.c
3388 +++ b/drivers/iio/magnetometer/hmc5843_i2c.c
3389 @@ -58,8 +58,13 @@ static const struct regmap_config hmc5843_i2c_regmap_config = {
3390 static int hmc5843_i2c_probe(struct i2c_client *cli,
3391 const struct i2c_device_id *id)
3392 {
3393 + struct regmap *regmap = devm_regmap_init_i2c(cli,
3394 + &hmc5843_i2c_regmap_config);
3395 + if (IS_ERR(regmap))
3396 + return PTR_ERR(regmap);
3397 +
3398 return hmc5843_common_probe(&cli->dev,
3399 - devm_regmap_init_i2c(cli, &hmc5843_i2c_regmap_config),
3400 + regmap,
3401 id->driver_data, id->name);
3402 }
3403
3404 diff --git a/drivers/iio/magnetometer/hmc5843_spi.c b/drivers/iio/magnetometer/hmc5843_spi.c
3405 index 535f03a70d63..79b2b707f90e 100644
3406 --- a/drivers/iio/magnetometer/hmc5843_spi.c
3407 +++ b/drivers/iio/magnetometer/hmc5843_spi.c
3408 @@ -58,6 +58,7 @@ static const struct regmap_config hmc5843_spi_regmap_config = {
3409 static int hmc5843_spi_probe(struct spi_device *spi)
3410 {
3411 int ret;
3412 + struct regmap *regmap;
3413 const struct spi_device_id *id = spi_get_device_id(spi);
3414
3415 spi->mode = SPI_MODE_3;
3416 @@ -67,8 +68,12 @@ static int hmc5843_spi_probe(struct spi_device *spi)
3417 if (ret)
3418 return ret;
3419
3420 + regmap = devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config);
3421 + if (IS_ERR(regmap))
3422 + return PTR_ERR(regmap);
3423 +
3424 return hmc5843_common_probe(&spi->dev,
3425 - devm_regmap_init_spi(spi, &hmc5843_spi_regmap_config),
3426 + regmap,
3427 id->driver_data, id->name);
3428 }
3429
3430 diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
3431 index 6f5be7802476..39dc7be56884 100644
3432 --- a/drivers/infiniband/core/cma.c
3433 +++ b/drivers/infiniband/core/cma.c
3434 @@ -1078,18 +1078,31 @@ static inline bool cma_any_addr(const struct sockaddr *addr)
3435 return cma_zero_addr(addr) || cma_loopback_addr(addr);
3436 }
3437
3438 -static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst)
3439 +static int cma_addr_cmp(const struct sockaddr *src, const struct sockaddr *dst)
3440 {
3441 if (src->sa_family != dst->sa_family)
3442 return -1;
3443
3444 switch (src->sa_family) {
3445 case AF_INET:
3446 - return ((struct sockaddr_in *) src)->sin_addr.s_addr !=
3447 - ((struct sockaddr_in *) dst)->sin_addr.s_addr;
3448 - case AF_INET6:
3449 - return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr,
3450 - &((struct sockaddr_in6 *) dst)->sin6_addr);
3451 + return ((struct sockaddr_in *)src)->sin_addr.s_addr !=
3452 + ((struct sockaddr_in *)dst)->sin_addr.s_addr;
3453 + case AF_INET6: {
3454 + struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *)src;
3455 + struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *)dst;
3456 + bool link_local;
3457 +
3458 + if (ipv6_addr_cmp(&src_addr6->sin6_addr,
3459 + &dst_addr6->sin6_addr))
3460 + return 1;
3461 + link_local = ipv6_addr_type(&dst_addr6->sin6_addr) &
3462 + IPV6_ADDR_LINKLOCAL;
3463 + /* Link local must match their scope_ids */
3464 + return link_local ? (src_addr6->sin6_scope_id !=
3465 + dst_addr6->sin6_scope_id) :
3466 + 0;
3467 + }
3468 +
3469 default:
3470 return ib_addr_cmp(&((struct sockaddr_ib *) src)->sib_addr,
3471 &((struct sockaddr_ib *) dst)->sib_addr);
3472 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
3473 index a68569ec86bf..3be6405d9855 100644
3474 --- a/drivers/infiniband/hw/cxgb4/cm.c
3475 +++ b/drivers/infiniband/hw/cxgb4/cm.c
3476 @@ -458,6 +458,8 @@ static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
3477 skb_reset_transport_header(skb);
3478 } else {
3479 skb = alloc_skb(len, gfp);
3480 + if (!skb)
3481 + return NULL;
3482 }
3483 t4_set_arp_err_handler(skb, NULL, NULL);
3484 return skb;
3485 diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
3486 index da786eb18558..368f4f08b686 100644
3487 --- a/drivers/infiniband/hw/hfi1/init.c
3488 +++ b/drivers/infiniband/hw/hfi1/init.c
3489 @@ -798,7 +798,8 @@ static int create_workqueues(struct hfi1_devdata *dd)
3490 ppd->hfi1_wq =
3491 alloc_workqueue(
3492 "hfi%d_%d",
3493 - WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE,
3494 + WQ_SYSFS | WQ_HIGHPRI | WQ_CPU_INTENSIVE |
3495 + WQ_MEM_RECLAIM,
3496 HFI1_MAX_ACTIVE_WORKQUEUE_ENTRIES,
3497 dd->unit, pidx);
3498 if (!ppd->hfi1_wq)
3499 diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c
3500 index 0d96c5bb38cd..d2d4ab9ab071 100644
3501 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c
3502 +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c
3503 @@ -66,7 +66,7 @@ struct ib_ah *hns_roce_create_ah(struct ib_pd *ibpd,
3504 HNS_ROCE_VLAN_SL_BIT_MASK) <<
3505 HNS_ROCE_VLAN_SL_SHIFT;
3506
3507 - ah->av.port_pd = cpu_to_be32(to_hr_pd(ibpd)->pdn |
3508 + ah->av.port_pd = cpu_to_le32(to_hr_pd(ibpd)->pdn |
3509 (rdma_ah_get_port_num(ah_attr) <<
3510 HNS_ROCE_PORT_NUM_SHIFT));
3511 ah->av.gid_index = grh->sgid_index;
3512 diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
3513 index 7a28232d868b..de85b3af3b39 100644
3514 --- a/drivers/md/bcache/alloc.c
3515 +++ b/drivers/md/bcache/alloc.c
3516 @@ -327,10 +327,11 @@ static int bch_allocator_thread(void *arg)
3517 * possibly issue discards to them, then we add the bucket to
3518 * the free list:
3519 */
3520 - while (!fifo_empty(&ca->free_inc)) {
3521 + while (1) {
3522 long bucket;
3523
3524 - fifo_pop(&ca->free_inc, bucket);
3525 + if (!fifo_pop(&ca->free_inc, bucket))
3526 + break;
3527
3528 if (ca->discard) {
3529 mutex_unlock(&ca->set->bucket_lock);
3530 diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
3531 index 772258ee1f51..f880e5eba8dd 100644
3532 --- a/drivers/md/bcache/journal.c
3533 +++ b/drivers/md/bcache/journal.c
3534 @@ -317,6 +317,18 @@ void bch_journal_mark(struct cache_set *c, struct list_head *list)
3535 }
3536 }
3537
3538 +bool is_discard_enabled(struct cache_set *s)
3539 +{
3540 + struct cache *ca;
3541 + unsigned int i;
3542 +
3543 + for_each_cache(ca, s, i)
3544 + if (ca->discard)
3545 + return true;
3546 +
3547 + return false;
3548 +}
3549 +
3550 int bch_journal_replay(struct cache_set *s, struct list_head *list)
3551 {
3552 int ret = 0, keys = 0, entries = 0;
3553 @@ -330,9 +342,17 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
3554 list_for_each_entry(i, list, list) {
3555 BUG_ON(i->pin && atomic_read(i->pin) != 1);
3556
3557 - cache_set_err_on(n != i->j.seq, s,
3558 -"bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
3559 - n, i->j.seq - 1, start, end);
3560 + if (n != i->j.seq) {
3561 + if (n == start && is_discard_enabled(s))
3562 + pr_info("bcache: journal entries %llu-%llu may be discarded! (replaying %llu-%llu)",
3563 + n, i->j.seq - 1, start, end);
3564 + else {
3565 + pr_err("bcache: journal entries %llu-%llu missing! (replaying %llu-%llu)",
3566 + n, i->j.seq - 1, start, end);
3567 + ret = -EIO;
3568 + goto err;
3569 + }
3570 + }
3571
3572 for (k = i->j.start;
3573 k < bset_bkey_last(&i->j);
3574 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
3575 index 2c0d35c882ed..2409507d7bff 100644
3576 --- a/drivers/md/bcache/super.c
3577 +++ b/drivers/md/bcache/super.c
3578 @@ -1770,13 +1770,15 @@ err:
3579 return NULL;
3580 }
3581
3582 -static void run_cache_set(struct cache_set *c)
3583 +static int run_cache_set(struct cache_set *c)
3584 {
3585 const char *err = "cannot allocate memory";
3586 struct cached_dev *dc, *t;
3587 struct cache *ca;
3588 struct closure cl;
3589 unsigned int i;
3590 + LIST_HEAD(journal);
3591 + struct journal_replay *l;
3592
3593 closure_init_stack(&cl);
3594
3595 @@ -1864,7 +1866,9 @@ static void run_cache_set(struct cache_set *c)
3596 if (j->version < BCACHE_JSET_VERSION_UUID)
3597 __uuid_write(c);
3598
3599 - bch_journal_replay(c, &journal);
3600 + err = "bcache: replay journal failed";
3601 + if (bch_journal_replay(c, &journal))
3602 + goto err;
3603 } else {
3604 pr_notice("invalidating existing data");
3605
3606 @@ -1932,11 +1936,19 @@ static void run_cache_set(struct cache_set *c)
3607 flash_devs_run(c);
3608
3609 set_bit(CACHE_SET_RUNNING, &c->flags);
3610 - return;
3611 + return 0;
3612 err:
3613 + while (!list_empty(&journal)) {
3614 + l = list_first_entry(&journal, struct journal_replay, list);
3615 + list_del(&l->list);
3616 + kfree(l);
3617 + }
3618 +
3619 closure_sync(&cl);
3620 /* XXX: test this, it's broken */
3621 bch_cache_set_error(c, "%s", err);
3622 +
3623 + return -EIO;
3624 }
3625
3626 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
3627 @@ -2000,8 +2012,11 @@ found:
3628 ca->set->cache[ca->sb.nr_this_dev] = ca;
3629 c->cache_by_alloc[c->caches_loaded++] = ca;
3630
3631 - if (c->caches_loaded == c->sb.nr_in_set)
3632 - run_cache_set(c);
3633 + if (c->caches_loaded == c->sb.nr_in_set) {
3634 + err = "failed to run cache set";
3635 + if (run_cache_set(c) < 0)
3636 + goto err;
3637 + }
3638
3639 return NULL;
3640 err:
3641 diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
3642 index 6889c25c62cb..9226dca44e90 100644
3643 --- a/drivers/media/common/videobuf2/videobuf2-core.c
3644 +++ b/drivers/media/common/videobuf2/videobuf2-core.c
3645 @@ -668,6 +668,11 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
3646 return -EBUSY;
3647 }
3648
3649 + if (q->waiting_in_dqbuf && *count) {
3650 + dprintk(1, "another dup()ped fd is waiting for a buffer\n");
3651 + return -EBUSY;
3652 + }
3653 +
3654 if (*count == 0 || q->num_buffers != 0 ||
3655 (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) {
3656 /*
3657 @@ -797,6 +802,10 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
3658 }
3659
3660 if (!q->num_buffers) {
3661 + if (q->waiting_in_dqbuf && *count) {
3662 + dprintk(1, "another dup()ped fd is waiting for a buffer\n");
3663 + return -EBUSY;
3664 + }
3665 memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
3666 q->memory = memory;
3667 q->waiting_for_buffers = !q->is_output;
3668 @@ -1466,6 +1475,11 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
3669 for (;;) {
3670 int ret;
3671
3672 + if (q->waiting_in_dqbuf) {
3673 + dprintk(1, "another dup()ped fd is waiting for a buffer\n");
3674 + return -EBUSY;
3675 + }
3676 +
3677 if (!q->streaming) {
3678 dprintk(1, "streaming off, will not wait for buffers\n");
3679 return -EINVAL;
3680 @@ -1493,6 +1507,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
3681 return -EAGAIN;
3682 }
3683
3684 + q->waiting_in_dqbuf = 1;
3685 /*
3686 * We are streaming and blocking, wait for another buffer to
3687 * become ready or for streamoff. Driver's lock is released to
3688 @@ -1513,6 +1528,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
3689 * the locks or return an error if one occurred.
3690 */
3691 call_void_qop(q, wait_finish, q);
3692 + q->waiting_in_dqbuf = 0;
3693 if (ret) {
3694 dprintk(1, "sleep was interrupted\n");
3695 return ret;
3696 @@ -2361,6 +2377,12 @@ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_
3697 if (!data)
3698 return -EINVAL;
3699
3700 + if (q->waiting_in_dqbuf) {
3701 + dprintk(3, "another dup()ped fd is %s\n",
3702 + read ? "reading" : "writing");
3703 + return -EBUSY;
3704 + }
3705 +
3706 /*
3707 * Initialize emulator on first call.
3708 */
3709 diff --git a/drivers/media/dvb-frontends/m88ds3103.c b/drivers/media/dvb-frontends/m88ds3103.c
3710 index dffd2d4bf1c8..c25c92797408 100644
3711 --- a/drivers/media/dvb-frontends/m88ds3103.c
3712 +++ b/drivers/media/dvb-frontends/m88ds3103.c
3713 @@ -309,6 +309,9 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
3714 u16 u16tmp;
3715 u32 tuner_frequency_khz, target_mclk;
3716 s32 s32tmp;
3717 + static const struct reg_sequence reset_buf[] = {
3718 + {0x07, 0x80}, {0x07, 0x00}
3719 + };
3720
3721 dev_dbg(&client->dev,
3722 "delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
3723 @@ -321,11 +324,7 @@ static int m88ds3103_set_frontend(struct dvb_frontend *fe)
3724 }
3725
3726 /* reset */
3727 - ret = regmap_write(dev->regmap, 0x07, 0x80);
3728 - if (ret)
3729 - goto err;
3730 -
3731 - ret = regmap_write(dev->regmap, 0x07, 0x00);
3732 + ret = regmap_multi_reg_write(dev->regmap, reset_buf, 2);
3733 if (ret)
3734 goto err;
3735
3736 diff --git a/drivers/media/dvb-frontends/si2165.c b/drivers/media/dvb-frontends/si2165.c
3737 index feacd8da421d..d55d8f169dca 100644
3738 --- a/drivers/media/dvb-frontends/si2165.c
3739 +++ b/drivers/media/dvb-frontends/si2165.c
3740 @@ -275,18 +275,20 @@ static u32 si2165_get_fe_clk(struct si2165_state *state)
3741
3742 static int si2165_wait_init_done(struct si2165_state *state)
3743 {
3744 - int ret = -EINVAL;
3745 + int ret;
3746 u8 val = 0;
3747 int i;
3748
3749 for (i = 0; i < 3; ++i) {
3750 - si2165_readreg8(state, REG_INIT_DONE, &val);
3751 + ret = si2165_readreg8(state, REG_INIT_DONE, &val);
3752 + if (ret < 0)
3753 + return ret;
3754 if (val == 0x01)
3755 return 0;
3756 usleep_range(1000, 50000);
3757 }
3758 dev_err(&state->client->dev, "init_done was not set\n");
3759 - return ret;
3760 + return -EINVAL;
3761 }
3762
3763 static int si2165_upload_firmware_block(struct si2165_state *state,
3764 diff --git a/drivers/media/i2c/ov2659.c b/drivers/media/i2c/ov2659.c
3765 index 4715edc8ca33..e6a8b5669b9c 100644
3766 --- a/drivers/media/i2c/ov2659.c
3767 +++ b/drivers/media/i2c/ov2659.c
3768 @@ -1117,8 +1117,10 @@ static int ov2659_set_fmt(struct v4l2_subdev *sd,
3769 if (ov2659_formats[index].code == mf->code)
3770 break;
3771
3772 - if (index < 0)
3773 - return -EINVAL;
3774 + if (index < 0) {
3775 + index = 0;
3776 + mf->code = ov2659_formats[index].code;
3777 + }
3778
3779 mf->colorspace = V4L2_COLORSPACE_SRGB;
3780 mf->field = V4L2_FIELD_NONE;
3781 diff --git a/drivers/media/i2c/ov6650.c b/drivers/media/i2c/ov6650.c
3782 index a9264d515e54..edded869d792 100644
3783 --- a/drivers/media/i2c/ov6650.c
3784 +++ b/drivers/media/i2c/ov6650.c
3785 @@ -811,9 +811,16 @@ static int ov6650_video_probe(struct i2c_client *client)
3786 u8 pidh, pidl, midh, midl;
3787 int ret;
3788
3789 + priv->clk = v4l2_clk_get(&client->dev, NULL);
3790 + if (IS_ERR(priv->clk)) {
3791 + ret = PTR_ERR(priv->clk);
3792 + dev_err(&client->dev, "v4l2_clk request err: %d\n", ret);
3793 + return ret;
3794 + }
3795 +
3796 ret = ov6650_s_power(&priv->subdev, 1);
3797 if (ret < 0)
3798 - return ret;
3799 + goto eclkput;
3800
3801 msleep(20);
3802
3803 @@ -850,6 +857,11 @@ static int ov6650_video_probe(struct i2c_client *client)
3804
3805 done:
3806 ov6650_s_power(&priv->subdev, 0);
3807 + if (!ret)
3808 + return 0;
3809 +eclkput:
3810 + v4l2_clk_put(priv->clk);
3811 +
3812 return ret;
3813 }
3814
3815 @@ -992,18 +1004,9 @@ static int ov6650_probe(struct i2c_client *client,
3816 priv->code = MEDIA_BUS_FMT_YUYV8_2X8;
3817 priv->colorspace = V4L2_COLORSPACE_JPEG;
3818
3819 - priv->clk = v4l2_clk_get(&client->dev, NULL);
3820 - if (IS_ERR(priv->clk)) {
3821 - ret = PTR_ERR(priv->clk);
3822 - goto eclkget;
3823 - }
3824 -
3825 ret = ov6650_video_probe(client);
3826 - if (ret) {
3827 - v4l2_clk_put(priv->clk);
3828 -eclkget:
3829 + if (ret)
3830 v4l2_ctrl_handler_free(&priv->hdl);
3831 - }
3832
3833 return ret;
3834 }
3835 diff --git a/drivers/media/pci/saa7146/hexium_gemini.c b/drivers/media/pci/saa7146/hexium_gemini.c
3836 index 5817d9cde4d0..6d8e4afe9673 100644
3837 --- a/drivers/media/pci/saa7146/hexium_gemini.c
3838 +++ b/drivers/media/pci/saa7146/hexium_gemini.c
3839 @@ -270,9 +270,8 @@ static int hexium_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_d
3840 /* enable i2c-port pins */
3841 saa7146_write(dev, MC1, (MASK_08 | MASK_24 | MASK_10 | MASK_26));
3842
3843 - hexium->i2c_adapter = (struct i2c_adapter) {
3844 - .name = "hexium gemini",
3845 - };
3846 + strscpy(hexium->i2c_adapter.name, "hexium gemini",
3847 + sizeof(hexium->i2c_adapter.name));
3848 saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
3849 if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
3850 DEB_S("cannot register i2c-device. skipping.\n");
3851 diff --git a/drivers/media/pci/saa7146/hexium_orion.c b/drivers/media/pci/saa7146/hexium_orion.c
3852 index 0a05176c18ab..a794f9e5f990 100644
3853 --- a/drivers/media/pci/saa7146/hexium_orion.c
3854 +++ b/drivers/media/pci/saa7146/hexium_orion.c
3855 @@ -231,9 +231,8 @@ static int hexium_probe(struct saa7146_dev *dev)
3856 saa7146_write(dev, DD1_STREAM_B, 0x00000000);
3857 saa7146_write(dev, MC2, (MASK_09 | MASK_25 | MASK_10 | MASK_26));
3858
3859 - hexium->i2c_adapter = (struct i2c_adapter) {
3860 - .name = "hexium orion",
3861 - };
3862 + strscpy(hexium->i2c_adapter.name, "hexium orion",
3863 + sizeof(hexium->i2c_adapter.name));
3864 saa7146_i2c_adapter_prepare(dev, &hexium->i2c_adapter, SAA7146_I2C_BUS_BIT_RATE_480);
3865 if (i2c_add_adapter(&hexium->i2c_adapter) < 0) {
3866 DEB_S("cannot register i2c-device. skipping.\n");
3867 diff --git a/drivers/media/platform/coda/coda-bit.c b/drivers/media/platform/coda/coda-bit.c
3868 index d20d3df5778b..a3cfefdbee12 100644
3869 --- a/drivers/media/platform/coda/coda-bit.c
3870 +++ b/drivers/media/platform/coda/coda-bit.c
3871 @@ -1999,6 +1999,9 @@ static int coda_prepare_decode(struct coda_ctx *ctx)
3872 /* Clear decode success flag */
3873 coda_write(dev, 0, CODA_RET_DEC_PIC_SUCCESS);
3874
3875 + /* Clear error return value */
3876 + coda_write(dev, 0, CODA_RET_DEC_PIC_ERR_MB);
3877 +
3878 trace_coda_dec_pic_run(ctx, meta);
3879
3880 coda_command_async(ctx, CODA_COMMAND_PIC_RUN);
3881 diff --git a/drivers/media/platform/stm32/stm32-dcmi.c b/drivers/media/platform/stm32/stm32-dcmi.c
3882 index 721564176d8c..d38682265892 100644
3883 --- a/drivers/media/platform/stm32/stm32-dcmi.c
3884 +++ b/drivers/media/platform/stm32/stm32-dcmi.c
3885 @@ -808,6 +808,9 @@ static int dcmi_try_fmt(struct stm32_dcmi *dcmi, struct v4l2_format *f,
3886
3887 sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
3888 if (!sd_fmt) {
3889 + if (!dcmi->num_of_sd_formats)
3890 + return -ENODATA;
3891 +
3892 sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
3893 pix->pixelformat = sd_fmt->fourcc;
3894 }
3895 @@ -986,6 +989,9 @@ static int dcmi_set_sensor_format(struct stm32_dcmi *dcmi,
3896
3897 sd_fmt = find_format_by_fourcc(dcmi, pix->pixelformat);
3898 if (!sd_fmt) {
3899 + if (!dcmi->num_of_sd_formats)
3900 + return -ENODATA;
3901 +
3902 sd_fmt = dcmi->sd_formats[dcmi->num_of_sd_formats - 1];
3903 pix->pixelformat = sd_fmt->fourcc;
3904 }
3905 @@ -1645,7 +1651,7 @@ static int dcmi_probe(struct platform_device *pdev)
3906 dcmi->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
3907 if (IS_ERR(dcmi->rstc)) {
3908 dev_err(&pdev->dev, "Could not get reset control\n");
3909 - return -ENODEV;
3910 + return PTR_ERR(dcmi->rstc);
3911 }
3912
3913 /* Get bus characteristics from devicetree */
3914 @@ -1660,7 +1666,7 @@ static int dcmi_probe(struct platform_device *pdev)
3915 of_node_put(np);
3916 if (ret) {
3917 dev_err(&pdev->dev, "Could not parse the endpoint\n");
3918 - return -ENODEV;
3919 + return ret;
3920 }
3921
3922 if (ep.bus_type == V4L2_MBUS_CSI2) {
3923 @@ -1673,8 +1679,9 @@ static int dcmi_probe(struct platform_device *pdev)
3924
3925 irq = platform_get_irq(pdev, 0);
3926 if (irq <= 0) {
3927 - dev_err(&pdev->dev, "Could not get irq\n");
3928 - return -ENODEV;
3929 + if (irq != -EPROBE_DEFER)
3930 + dev_err(&pdev->dev, "Could not get irq\n");
3931 + return irq;
3932 }
3933
3934 dcmi->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3935 @@ -1694,12 +1701,13 @@ static int dcmi_probe(struct platform_device *pdev)
3936 dev_name(&pdev->dev), dcmi);
3937 if (ret) {
3938 dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
3939 - return -ENODEV;
3940 + return ret;
3941 }
3942
3943 mclk = devm_clk_get(&pdev->dev, "mclk");
3944 if (IS_ERR(mclk)) {
3945 - dev_err(&pdev->dev, "Unable to get mclk\n");
3946 + if (PTR_ERR(mclk) != -EPROBE_DEFER)
3947 + dev_err(&pdev->dev, "Unable to get mclk\n");
3948 return PTR_ERR(mclk);
3949 }
3950
3951 diff --git a/drivers/media/platform/video-mux.c b/drivers/media/platform/video-mux.c
3952 index c01e1592ad0a..c8ffe7bff77f 100644
3953 --- a/drivers/media/platform/video-mux.c
3954 +++ b/drivers/media/platform/video-mux.c
3955 @@ -365,9 +365,14 @@ static int video_mux_probe(struct platform_device *pdev)
3956 vmux->active = -1;
3957 vmux->pads = devm_kcalloc(dev, num_pads, sizeof(*vmux->pads),
3958 GFP_KERNEL);
3959 + if (!vmux->pads)
3960 + return -ENOMEM;
3961 +
3962 vmux->format_mbus = devm_kcalloc(dev, num_pads,
3963 sizeof(*vmux->format_mbus),
3964 GFP_KERNEL);
3965 + if (!vmux->format_mbus)
3966 + return -ENOMEM;
3967
3968 for (i = 0; i < num_pads; i++) {
3969 vmux->pads[i].flags = (i < num_pads - 1) ? MEDIA_PAD_FL_SINK
3970 diff --git a/drivers/media/platform/vimc/vimc-core.c b/drivers/media/platform/vimc/vimc-core.c
3971 index 9246f265de31..27db8835c241 100644
3972 --- a/drivers/media/platform/vimc/vimc-core.c
3973 +++ b/drivers/media/platform/vimc/vimc-core.c
3974 @@ -303,6 +303,8 @@ static int vimc_probe(struct platform_device *pdev)
3975
3976 dev_dbg(&pdev->dev, "probe");
3977
3978 + memset(&vimc->mdev, 0, sizeof(vimc->mdev));
3979 +
3980 /* Create platform_device for each entity in the topology*/
3981 vimc->subdevs = devm_kcalloc(&vimc->pdev.dev, vimc->pipe_cfg->num_ents,
3982 sizeof(*vimc->subdevs), GFP_KERNEL);
3983 diff --git a/drivers/media/platform/vimc/vimc-streamer.c b/drivers/media/platform/vimc/vimc-streamer.c
3984 index fcc897fb247b..392754c18046 100644
3985 --- a/drivers/media/platform/vimc/vimc-streamer.c
3986 +++ b/drivers/media/platform/vimc/vimc-streamer.c
3987 @@ -120,7 +120,6 @@ static int vimc_streamer_thread(void *data)
3988 int i;
3989
3990 set_freezable();
3991 - set_current_state(TASK_UNINTERRUPTIBLE);
3992
3993 for (;;) {
3994 try_to_freeze();
3995 @@ -137,6 +136,7 @@ static int vimc_streamer_thread(void *data)
3996 break;
3997 }
3998 //wait for 60hz
3999 + set_current_state(TASK_UNINTERRUPTIBLE);
4000 schedule_timeout(HZ / 60);
4001 }
4002
4003 diff --git a/drivers/media/platform/vivid/vivid-vid-cap.c b/drivers/media/platform/vivid/vivid-vid-cap.c
4004 index baa7c83ee6e0..3b09ffceefd5 100644
4005 --- a/drivers/media/platform/vivid/vivid-vid-cap.c
4006 +++ b/drivers/media/platform/vivid/vivid-vid-cap.c
4007 @@ -992,7 +992,7 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection
4008 v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
4009 if (dev->bitmap_cap && (compose->width != s->r.width ||
4010 compose->height != s->r.height)) {
4011 - kfree(dev->bitmap_cap);
4012 + vfree(dev->bitmap_cap);
4013 dev->bitmap_cap = NULL;
4014 }
4015 *compose = s->r;
4016 diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
4017 index 800d69c3f80b..1cf4019689a5 100644
4018 --- a/drivers/media/radio/wl128x/fmdrv_common.c
4019 +++ b/drivers/media/radio/wl128x/fmdrv_common.c
4020 @@ -489,7 +489,8 @@ int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
4021 return -EIO;
4022 }
4023 /* Send response data to caller */
4024 - if (response != NULL && response_len != NULL && evt_hdr->dlen) {
4025 + if (response != NULL && response_len != NULL && evt_hdr->dlen &&
4026 + evt_hdr->dlen <= payload_len) {
4027 /* Skip header info and copy only response data */
4028 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
4029 memcpy(response, skb->data, evt_hdr->dlen);
4030 @@ -583,6 +584,8 @@ static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
4031 return;
4032
4033 fm_evt_hdr = (void *)skb->data;
4034 + if (fm_evt_hdr->dlen > sizeof(fmdev->irq_info.flag))
4035 + return;
4036
4037 /* Skip header info and copy only response data */
4038 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
4039 @@ -1308,7 +1311,7 @@ static int load_default_rx_configuration(struct fmdev *fmdev)
4040 static int fm_power_up(struct fmdev *fmdev, u8 mode)
4041 {
4042 u16 payload;
4043 - __be16 asic_id, asic_ver;
4044 + __be16 asic_id = 0, asic_ver = 0;
4045 int resp_len, ret;
4046 u8 fw_name[50];
4047
4048 diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c
4049 index 8bf5637b3a69..e613c0175591 100644
4050 --- a/drivers/media/rc/serial_ir.c
4051 +++ b/drivers/media/rc/serial_ir.c
4052 @@ -773,8 +773,6 @@ static void serial_ir_exit(void)
4053
4054 static int __init serial_ir_init_module(void)
4055 {
4056 - int result;
4057 -
4058 switch (type) {
4059 case IR_HOMEBREW:
4060 case IR_IRDEO:
4061 @@ -802,12 +800,7 @@ static int __init serial_ir_init_module(void)
4062 if (sense != -1)
4063 sense = !!sense;
4064
4065 - result = serial_ir_init();
4066 - if (!result)
4067 - return 0;
4068 -
4069 - serial_ir_exit();
4070 - return result;
4071 + return serial_ir_init();
4072 }
4073
4074 static void __exit serial_ir_exit_module(void)
4075 diff --git a/drivers/media/usb/au0828/au0828-video.c b/drivers/media/usb/au0828/au0828-video.c
4076 index 62b45062b1e6..3e111f7f56df 100644
4077 --- a/drivers/media/usb/au0828/au0828-video.c
4078 +++ b/drivers/media/usb/au0828/au0828-video.c
4079 @@ -758,6 +758,9 @@ static int au0828_analog_stream_enable(struct au0828_dev *d)
4080
4081 dprintk(1, "au0828_analog_stream_enable called\n");
4082
4083 + if (test_bit(DEV_DISCONNECTED, &d->dev_state))
4084 + return -ENODEV;
4085 +
4086 iface = usb_ifnum_to_if(d->usbdev, 0);
4087 if (iface && iface->cur_altsetting->desc.bAlternateSetting != 5) {
4088 dprintk(1, "Changing intf#0 to alt 5\n");
4089 @@ -839,9 +842,9 @@ int au0828_start_analog_streaming(struct vb2_queue *vq, unsigned int count)
4090 return rc;
4091 }
4092
4093 + v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 1);
4094 +
4095 if (vq->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) {
4096 - v4l2_device_call_all(&dev->v4l2_dev, 0, video,
4097 - s_stream, 1);
4098 dev->vid_timeout_running = 1;
4099 mod_timer(&dev->vid_timeout, jiffies + (HZ / 10));
4100 } else if (vq->type == V4L2_BUF_TYPE_VBI_CAPTURE) {
4101 @@ -861,10 +864,11 @@ static void au0828_stop_streaming(struct vb2_queue *vq)
4102
4103 dprintk(1, "au0828_stop_streaming called %d\n", dev->streaming_users);
4104
4105 - if (dev->streaming_users-- == 1)
4106 + if (dev->streaming_users-- == 1) {
4107 au0828_uninit_isoc(dev);
4108 + v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
4109 + }
4110
4111 - v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
4112 dev->vid_timeout_running = 0;
4113 del_timer_sync(&dev->vid_timeout);
4114
4115 @@ -893,8 +897,10 @@ void au0828_stop_vbi_streaming(struct vb2_queue *vq)
4116 dprintk(1, "au0828_stop_vbi_streaming called %d\n",
4117 dev->streaming_users);
4118
4119 - if (dev->streaming_users-- == 1)
4120 + if (dev->streaming_users-- == 1) {
4121 au0828_uninit_isoc(dev);
4122 + v4l2_device_call_all(&dev->v4l2_dev, 0, video, s_stream, 0);
4123 + }
4124
4125 spin_lock_irqsave(&dev->slock, flags);
4126 if (dev->isoc_ctl.vbi_buf != NULL) {
4127 diff --git a/drivers/media/usb/cpia2/cpia2_v4l.c b/drivers/media/usb/cpia2/cpia2_v4l.c
4128 index 99f106b13280..d47318958fe5 100644
4129 --- a/drivers/media/usb/cpia2/cpia2_v4l.c
4130 +++ b/drivers/media/usb/cpia2/cpia2_v4l.c
4131 @@ -1244,8 +1244,7 @@ static int __init cpia2_init(void)
4132 LOG("%s v%s\n",
4133 ABOUT, CPIA_VERSION);
4134 check_parameters();
4135 - cpia2_usb_init();
4136 - return 0;
4137 + return cpia2_usb_init();
4138 }
4139
4140
4141 diff --git a/drivers/media/usb/dvb-usb-v2/dvbsky.c b/drivers/media/usb/dvb-usb-v2/dvbsky.c
4142 index e28bd8836751..ae0814dd202a 100644
4143 --- a/drivers/media/usb/dvb-usb-v2/dvbsky.c
4144 +++ b/drivers/media/usb/dvb-usb-v2/dvbsky.c
4145 @@ -615,16 +615,18 @@ static int dvbsky_init(struct dvb_usb_device *d)
4146 return 0;
4147 }
4148
4149 -static void dvbsky_exit(struct dvb_usb_device *d)
4150 +static int dvbsky_frontend_detach(struct dvb_usb_adapter *adap)
4151 {
4152 + struct dvb_usb_device *d = adap_to_d(adap);
4153 struct dvbsky_state *state = d_to_priv(d);
4154 - struct dvb_usb_adapter *adap = &d->adapter[0];
4155 +
4156 + dev_dbg(&d->udev->dev, "%s: adap=%d\n", __func__, adap->id);
4157
4158 dvb_module_release(state->i2c_client_tuner);
4159 dvb_module_release(state->i2c_client_demod);
4160 dvb_module_release(state->i2c_client_ci);
4161
4162 - adap->fe[0] = NULL;
4163 + return 0;
4164 }
4165
4166 /* DVB USB Driver stuff */
4167 @@ -640,11 +642,11 @@ static struct dvb_usb_device_properties dvbsky_s960_props = {
4168
4169 .i2c_algo = &dvbsky_i2c_algo,
4170 .frontend_attach = dvbsky_s960_attach,
4171 + .frontend_detach = dvbsky_frontend_detach,
4172 .init = dvbsky_init,
4173 .get_rc_config = dvbsky_get_rc_config,
4174 .streaming_ctrl = dvbsky_streaming_ctrl,
4175 .identify_state = dvbsky_identify_state,
4176 - .exit = dvbsky_exit,
4177 .read_mac_address = dvbsky_read_mac_addr,
4178
4179 .num_adapters = 1,
4180 @@ -667,11 +669,11 @@ static struct dvb_usb_device_properties dvbsky_s960c_props = {
4181
4182 .i2c_algo = &dvbsky_i2c_algo,
4183 .frontend_attach = dvbsky_s960c_attach,
4184 + .frontend_detach = dvbsky_frontend_detach,
4185 .init = dvbsky_init,
4186 .get_rc_config = dvbsky_get_rc_config,
4187 .streaming_ctrl = dvbsky_streaming_ctrl,
4188 .identify_state = dvbsky_identify_state,
4189 - .exit = dvbsky_exit,
4190 .read_mac_address = dvbsky_read_mac_addr,
4191
4192 .num_adapters = 1,
4193 @@ -694,11 +696,11 @@ static struct dvb_usb_device_properties dvbsky_t680c_props = {
4194
4195 .i2c_algo = &dvbsky_i2c_algo,
4196 .frontend_attach = dvbsky_t680c_attach,
4197 + .frontend_detach = dvbsky_frontend_detach,
4198 .init = dvbsky_init,
4199 .get_rc_config = dvbsky_get_rc_config,
4200 .streaming_ctrl = dvbsky_streaming_ctrl,
4201 .identify_state = dvbsky_identify_state,
4202 - .exit = dvbsky_exit,
4203 .read_mac_address = dvbsky_read_mac_addr,
4204
4205 .num_adapters = 1,
4206 @@ -721,11 +723,11 @@ static struct dvb_usb_device_properties dvbsky_t330_props = {
4207
4208 .i2c_algo = &dvbsky_i2c_algo,
4209 .frontend_attach = dvbsky_t330_attach,
4210 + .frontend_detach = dvbsky_frontend_detach,
4211 .init = dvbsky_init,
4212 .get_rc_config = dvbsky_get_rc_config,
4213 .streaming_ctrl = dvbsky_streaming_ctrl,
4214 .identify_state = dvbsky_identify_state,
4215 - .exit = dvbsky_exit,
4216 .read_mac_address = dvbsky_read_mac_addr,
4217
4218 .num_adapters = 1,
4219 @@ -748,11 +750,11 @@ static struct dvb_usb_device_properties mygica_t230c_props = {
4220
4221 .i2c_algo = &dvbsky_i2c_algo,
4222 .frontend_attach = dvbsky_mygica_t230c_attach,
4223 + .frontend_detach = dvbsky_frontend_detach,
4224 .init = dvbsky_init,
4225 .get_rc_config = dvbsky_get_rc_config,
4226 .streaming_ctrl = dvbsky_streaming_ctrl,
4227 .identify_state = dvbsky_identify_state,
4228 - .exit = dvbsky_exit,
4229
4230 .num_adapters = 1,
4231 .adapter = {
4232 diff --git a/drivers/media/usb/go7007/go7007-fw.c b/drivers/media/usb/go7007/go7007-fw.c
4233 index 24f5b615dc7a..dfa9f899d0c2 100644
4234 --- a/drivers/media/usb/go7007/go7007-fw.c
4235 +++ b/drivers/media/usb/go7007/go7007-fw.c
4236 @@ -1499,8 +1499,8 @@ static int modet_to_package(struct go7007 *go, __le16 *code, int space)
4237 return cnt;
4238 }
4239
4240 -static int do_special(struct go7007 *go, u16 type, __le16 *code, int space,
4241 - int *framelen)
4242 +static noinline_for_stack int do_special(struct go7007 *go, u16 type,
4243 + __le16 *code, int space, int *framelen)
4244 {
4245 switch (type) {
4246 case SPECIAL_FRM_HEAD:
4247 diff --git a/drivers/media/usb/gspca/gspca.c b/drivers/media/usb/gspca/gspca.c
4248 index 405a6a76d820..b12356c533a6 100644
4249 --- a/drivers/media/usb/gspca/gspca.c
4250 +++ b/drivers/media/usb/gspca/gspca.c
4251 @@ -294,7 +294,7 @@ static void fill_frame(struct gspca_dev *gspca_dev,
4252 /* check the packet status and length */
4253 st = urb->iso_frame_desc[i].status;
4254 if (st) {
4255 - pr_err("ISOC data error: [%d] len=%d, status=%d\n",
4256 + gspca_dbg(gspca_dev, D_PACK, "ISOC data error: [%d] len=%d, status=%d\n",
4257 i, len, st);
4258 gspca_dev->last_packet_type = DISCARD_PACKET;
4259 continue;
4260 @@ -314,6 +314,8 @@ static void fill_frame(struct gspca_dev *gspca_dev,
4261 }
4262
4263 resubmit:
4264 + if (!gspca_dev->streaming)
4265 + return;
4266 /* resubmit the URB */
4267 st = usb_submit_urb(urb, GFP_ATOMIC);
4268 if (st < 0)
4269 @@ -330,7 +332,7 @@ static void isoc_irq(struct urb *urb)
4270 struct gspca_dev *gspca_dev = (struct gspca_dev *) urb->context;
4271
4272 gspca_dbg(gspca_dev, D_PACK, "isoc irq\n");
4273 - if (!vb2_start_streaming_called(&gspca_dev->queue))
4274 + if (!gspca_dev->streaming)
4275 return;
4276 fill_frame(gspca_dev, urb);
4277 }
4278 @@ -344,7 +346,7 @@ static void bulk_irq(struct urb *urb)
4279 int st;
4280
4281 gspca_dbg(gspca_dev, D_PACK, "bulk irq\n");
4282 - if (!vb2_start_streaming_called(&gspca_dev->queue))
4283 + if (!gspca_dev->streaming)
4284 return;
4285 switch (urb->status) {
4286 case 0:
4287 @@ -367,6 +369,8 @@ static void bulk_irq(struct urb *urb)
4288 urb->actual_length);
4289
4290 resubmit:
4291 + if (!gspca_dev->streaming)
4292 + return;
4293 /* resubmit the URB */
4294 if (gspca_dev->cam.bulk_nurbs != 0) {
4295 st = usb_submit_urb(urb, GFP_ATOMIC);
4296 @@ -1630,6 +1634,8 @@ void gspca_disconnect(struct usb_interface *intf)
4297
4298 mutex_lock(&gspca_dev->usb_lock);
4299 gspca_dev->present = false;
4300 + destroy_urbs(gspca_dev);
4301 + gspca_input_destroy_urb(gspca_dev);
4302
4303 vb2_queue_error(&gspca_dev->queue);
4304
4305 diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
4306 index a8519da0020b..673fdca8d2da 100644
4307 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
4308 +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c
4309 @@ -666,6 +666,8 @@ static int ctrl_get_input(struct pvr2_ctrl *cptr,int *vp)
4310
4311 static int ctrl_check_input(struct pvr2_ctrl *cptr,int v)
4312 {
4313 + if (v < 0 || v > PVR2_CVAL_INPUT_MAX)
4314 + return 0;
4315 return ((1 << v) & cptr->hdw->input_allowed_mask) != 0;
4316 }
4317
4318 diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
4319 index 25648add77e5..bd2b7a67b732 100644
4320 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
4321 +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.h
4322 @@ -50,6 +50,7 @@
4323 #define PVR2_CVAL_INPUT_COMPOSITE 2
4324 #define PVR2_CVAL_INPUT_SVIDEO 3
4325 #define PVR2_CVAL_INPUT_RADIO 4
4326 +#define PVR2_CVAL_INPUT_MAX PVR2_CVAL_INPUT_RADIO
4327
4328 enum pvr2_config {
4329 pvr2_config_empty, /* No configuration */
4330 diff --git a/drivers/mmc/core/pwrseq_emmc.c b/drivers/mmc/core/pwrseq_emmc.c
4331 index efb8a7965dd4..154f4204d58c 100644
4332 --- a/drivers/mmc/core/pwrseq_emmc.c
4333 +++ b/drivers/mmc/core/pwrseq_emmc.c
4334 @@ -30,19 +30,14 @@ struct mmc_pwrseq_emmc {
4335
4336 #define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
4337
4338 -static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
4339 -{
4340 - gpiod_set_value(pwrseq->reset_gpio, 1);
4341 - udelay(1);
4342 - gpiod_set_value(pwrseq->reset_gpio, 0);
4343 - udelay(200);
4344 -}
4345 -
4346 static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
4347 {
4348 struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
4349
4350 - __mmc_pwrseq_emmc_reset(pwrseq);
4351 + gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
4352 + udelay(1);
4353 + gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
4354 + udelay(200);
4355 }
4356
4357 static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
4358 @@ -50,8 +45,11 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
4359 {
4360 struct mmc_pwrseq_emmc *pwrseq = container_of(this,
4361 struct mmc_pwrseq_emmc, reset_nb);
4362 + gpiod_set_value(pwrseq->reset_gpio, 1);
4363 + udelay(1);
4364 + gpiod_set_value(pwrseq->reset_gpio, 0);
4365 + udelay(200);
4366
4367 - __mmc_pwrseq_emmc_reset(pwrseq);
4368 return NOTIFY_DONE;
4369 }
4370
4371 @@ -72,14 +70,18 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
4372 if (IS_ERR(pwrseq->reset_gpio))
4373 return PTR_ERR(pwrseq->reset_gpio);
4374
4375 - /*
4376 - * register reset handler to ensure emmc reset also from
4377 - * emergency_reboot(), priority 255 is the highest priority
4378 - * so it will be executed before any system reboot handler.
4379 - */
4380 - pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
4381 - pwrseq->reset_nb.priority = 255;
4382 - register_restart_handler(&pwrseq->reset_nb);
4383 + if (!gpiod_cansleep(pwrseq->reset_gpio)) {
4384 + /*
4385 + * register reset handler to ensure emmc reset also from
4386 + * emergency_reboot(), priority 255 is the highest priority
4387 + * so it will be executed before any system reboot handler.
4388 + */
4389 + pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
4390 + pwrseq->reset_nb.priority = 255;
4391 + register_restart_handler(&pwrseq->reset_nb);
4392 + } else {
4393 + dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n");
4394 + }
4395
4396 pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
4397 pwrseq->pwrseq.dev = dev;
4398 diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
4399 index d0d9f90e7cdf..cfb8ee24eaba 100644
4400 --- a/drivers/mmc/core/sd.c
4401 +++ b/drivers/mmc/core/sd.c
4402 @@ -216,6 +216,14 @@ static int mmc_decode_scr(struct mmc_card *card)
4403
4404 if (scr->sda_spec3)
4405 scr->cmds = UNSTUFF_BITS(resp, 32, 2);
4406 +
4407 + /* SD Spec says: any SD Card shall set at least bits 0 and 2 */
4408 + if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
4409 + !(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) {
4410 + pr_err("%s: invalid bus width\n", mmc_hostname(card->host));
4411 + return -EINVAL;
4412 + }
4413 +
4414 return 0;
4415 }
4416
4417 diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
4418 index 67f6bd24a9d0..ea254d00541f 100644
4419 --- a/drivers/mmc/host/mmc_spi.c
4420 +++ b/drivers/mmc/host/mmc_spi.c
4421 @@ -819,6 +819,10 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
4422 }
4423
4424 status = spi_sync_locked(spi, &host->m);
4425 + if (status < 0) {
4426 + dev_dbg(&spi->dev, "read error %d\n", status);
4427 + return status;
4428 + }
4429
4430 if (host->dma_dev) {
4431 dma_sync_single_for_cpu(host->dma_dev,
4432 diff --git a/drivers/mmc/host/sdhci-iproc.c b/drivers/mmc/host/sdhci-iproc.c
4433 index 94eeed2a1b53..f903ab96aa21 100644
4434 --- a/drivers/mmc/host/sdhci-iproc.c
4435 +++ b/drivers/mmc/host/sdhci-iproc.c
4436 @@ -185,7 +185,8 @@ static const struct sdhci_ops sdhci_iproc_32only_ops = {
4437 };
4438
4439 static const struct sdhci_pltfm_data sdhci_iproc_cygnus_pltfm_data = {
4440 - .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK,
4441 + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
4442 + SDHCI_QUIRK_NO_HISPD_BIT,
4443 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN | SDHCI_QUIRK2_HOST_OFF_CARD_ON,
4444 .ops = &sdhci_iproc_32only_ops,
4445 };
4446 @@ -208,7 +209,8 @@ static const struct sdhci_iproc_data iproc_cygnus_data = {
4447
4448 static const struct sdhci_pltfm_data sdhci_iproc_pltfm_data = {
4449 .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |
4450 - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12,
4451 + SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12 |
4452 + SDHCI_QUIRK_NO_HISPD_BIT,
4453 .quirks2 = SDHCI_QUIRK2_ACMD23_BROKEN,
4454 .ops = &sdhci_iproc_ops,
4455 };
4456 diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c
4457 index a7bf8515116f..e5c598ae5f24 100644
4458 --- a/drivers/mmc/host/sdhci-of-esdhc.c
4459 +++ b/drivers/mmc/host/sdhci-of-esdhc.c
4460 @@ -643,6 +643,9 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
4461 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
4462 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
4463
4464 + if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
4465 + mdelay(5);
4466 +
4467 if (mask & SDHCI_RESET_ALL) {
4468 val = sdhci_readl(host, ESDHC_TBCTL);
4469 val &= ~ESDHC_TB_EN;
4470 @@ -917,6 +920,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
4471 if (esdhc->vendor_ver > VENDOR_V_22)
4472 host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
4473
4474 + if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
4475 + host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
4476 + host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
4477 + }
4478 +
4479 if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
4480 of_device_is_compatible(np, "fsl,p5020-esdhc") ||
4481 of_device_is_compatible(np, "fsl,p4080-esdhc") ||
4482 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
4483 index 1b5f591cf0a2..b5d72815776c 100644
4484 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
4485 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
4486 @@ -2223,7 +2223,7 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev)
4487
4488 host_info->os_type = ENA_ADMIN_OS_LINUX;
4489 host_info->kernel_ver = LINUX_VERSION_CODE;
4490 - strncpy(host_info->kernel_ver_str, utsname()->version,
4491 + strlcpy(host_info->kernel_ver_str, utsname()->version,
4492 sizeof(host_info->kernel_ver_str) - 1);
4493 host_info->os_dist = 0;
4494 strncpy(host_info->os_dist_str, utsname()->release,
4495 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
4496 index c2fd323c4078..ea75f275023f 100644
4497 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
4498 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
4499 @@ -75,8 +75,8 @@ struct l2t_data {
4500 struct l2t_entry *rover; /* starting point for next allocation */
4501 atomic_t nfree; /* number of free entries */
4502 rwlock_t lock;
4503 - struct l2t_entry l2tab[0];
4504 struct rcu_head rcu_head; /* to handle rcu cleanup */
4505 + struct l2t_entry l2tab[];
4506 };
4507
4508 typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
4509 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4510 index 961e3087d1d3..bb04c695ab9f 100644
4511 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4512 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
4513 @@ -6010,15 +6010,24 @@ static int __init cxgb4_init_module(void)
4514
4515 ret = pci_register_driver(&cxgb4_driver);
4516 if (ret < 0)
4517 - debugfs_remove(cxgb4_debugfs_root);
4518 + goto err_pci;
4519
4520 #if IS_ENABLED(CONFIG_IPV6)
4521 if (!inet6addr_registered) {
4522 - register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
4523 - inet6addr_registered = true;
4524 + ret = register_inet6addr_notifier(&cxgb4_inet6addr_notifier);
4525 + if (ret)
4526 + pci_unregister_driver(&cxgb4_driver);
4527 + else
4528 + inet6addr_registered = true;
4529 }
4530 #endif
4531
4532 + if (ret == 0)
4533 + return ret;
4534 +
4535 +err_pci:
4536 + debugfs_remove(cxgb4_debugfs_root);
4537 +
4538 return ret;
4539 }
4540
4541 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
4542 index 8b11682ebba2..8cd339c92c1a 100644
4543 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
4544 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
4545 @@ -7329,7 +7329,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4546
4547 dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
4548
4549 - if (pci_dev_run_wake(pdev))
4550 + if (pci_dev_run_wake(pdev) && hw->mac.type < e1000_pch_cnp)
4551 pm_runtime_put_noidle(&pdev->dev);
4552
4553 return 0;
4554 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
4555 index f81ad0aa8b09..df8808cd7e11 100644
4556 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
4557 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
4558 @@ -2654,6 +2654,10 @@ void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
4559 struct i40e_vsi_context ctxt;
4560 i40e_status ret;
4561
4562 + /* Don't modify stripping options if a port VLAN is active */
4563 + if (vsi->info.pvid)
4564 + return;
4565 +
4566 if ((vsi->info.valid_sections &
4567 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
4568 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
4569 @@ -2684,6 +2688,10 @@ void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
4570 struct i40e_vsi_context ctxt;
4571 i40e_status ret;
4572
4573 + /* Don't modify stripping options if a port VLAN is active */
4574 + if (vsi->info.pvid)
4575 + return;
4576 +
4577 if ((vsi->info.valid_sections &
4578 cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
4579 ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
4580 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
4581 index c6d24eaede18..d86f3fa7aa6a 100644
4582 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
4583 +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
4584 @@ -2399,8 +2399,10 @@ error_param:
4585 (u8 *)&stats, sizeof(stats));
4586 }
4587
4588 -/* If the VF is not trusted restrict the number of MAC/VLAN it can program */
4589 -#define I40E_VC_MAX_MAC_ADDR_PER_VF 12
4590 +/* If the VF is not trusted restrict the number of MAC/VLAN it can program
4591 + * MAC filters: 16 for multicast, 1 for MAC, 1 for broadcast
4592 + */
4593 +#define I40E_VC_MAX_MAC_ADDR_PER_VF (16 + 1 + 1)
4594 #define I40E_VC_MAX_VLAN_PER_VF 8
4595
4596 /**
4597 diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
4598 index aa39a068858e..5aa083d9a6c9 100644
4599 --- a/drivers/net/ethernet/intel/igb/igb_main.c
4600 +++ b/drivers/net/ethernet/intel/igb/igb_main.c
4601 @@ -3468,6 +3468,9 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4602 break;
4603 }
4604 }
4605 +
4606 + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
4607 +
4608 pm_runtime_put_noidle(&pdev->dev);
4609 return 0;
4610
4611 diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
4612 index fb12b63439c6..35413041dcf8 100644
4613 --- a/drivers/net/hyperv/netvsc.c
4614 +++ b/drivers/net/hyperv/netvsc.c
4615 @@ -872,12 +872,6 @@ static inline int netvsc_send_pkt(
4616 } else if (ret == -EAGAIN) {
4617 netif_tx_stop_queue(txq);
4618 ndev_ctx->eth_stats.stop_queue++;
4619 - if (atomic_read(&nvchan->queue_sends) < 1 &&
4620 - !net_device->tx_disable) {
4621 - netif_tx_wake_queue(txq);
4622 - ndev_ctx->eth_stats.wake_queue++;
4623 - ret = -ENOSPC;
4624 - }
4625 } else {
4626 netdev_err(ndev,
4627 "Unable to send packet pages %u len %u, ret %d\n",
4628 @@ -885,6 +879,15 @@ static inline int netvsc_send_pkt(
4629 ret);
4630 }
4631
4632 + if (netif_tx_queue_stopped(txq) &&
4633 + atomic_read(&nvchan->queue_sends) < 1 &&
4634 + !net_device->tx_disable) {
4635 + netif_tx_wake_queue(txq);
4636 + ndev_ctx->eth_stats.wake_queue++;
4637 + if (ret == -EAGAIN)
4638 + ret = -ENOSPC;
4639 + }
4640 +
4641 return ret;
4642 }
4643
4644 diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
4645 index 366217263d70..d9a6699abe59 100644
4646 --- a/drivers/net/usb/qmi_wwan.c
4647 +++ b/drivers/net/usb/qmi_wwan.c
4648 @@ -63,6 +63,7 @@ enum qmi_wwan_flags {
4649
4650 enum qmi_wwan_quirks {
4651 QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
4652 + QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
4653 };
4654
4655 struct qmimux_hdr {
4656 @@ -845,6 +846,16 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
4657 .data = QMI_WWAN_QUIRK_DTR,
4658 };
4659
4660 +static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
4661 + .description = "WWAN/QMI device",
4662 + .flags = FLAG_WWAN | FLAG_SEND_ZLP,
4663 + .bind = qmi_wwan_bind,
4664 + .unbind = qmi_wwan_unbind,
4665 + .manage_power = qmi_wwan_manage_power,
4666 + .rx_fixup = qmi_wwan_rx_fixup,
4667 + .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
4668 +};
4669 +
4670 #define HUAWEI_VENDOR_ID 0x12D1
4671
4672 /* map QMI/wwan function by a fixed interface number */
4673 @@ -865,6 +876,15 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
4674 #define QMI_GOBI_DEVICE(vend, prod) \
4675 QMI_FIXED_INTF(vend, prod, 0)
4676
4677 +/* Quectel does not use fixed interface numbers on at least some of their
4678 + * devices. We need to check the number of endpoints to ensure that we bind to
4679 + * the correct interface.
4680 + */
4681 +#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
4682 + USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
4683 + USB_SUBCLASS_VENDOR_SPEC, 0xff), \
4684 + .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
4685 +
4686 static const struct usb_device_id products[] = {
4687 /* 1. CDC ECM like devices match on the control interface */
4688 { /* Huawei E392, E398 and possibly others sharing both device id and more... */
4689 @@ -969,20 +989,9 @@ static const struct usb_device_id products[] = {
4690 USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
4691 .driver_info = (unsigned long)&qmi_wwan_info,
4692 },
4693 - { /* Quectel EP06/EG06/EM06 */
4694 - USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306,
4695 - USB_CLASS_VENDOR_SPEC,
4696 - USB_SUBCLASS_VENDOR_SPEC,
4697 - 0xff),
4698 - .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
4699 - },
4700 - { /* Quectel EG12/EM12 */
4701 - USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
4702 - USB_CLASS_VENDOR_SPEC,
4703 - USB_SUBCLASS_VENDOR_SPEC,
4704 - 0xff),
4705 - .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
4706 - },
4707 + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
4708 + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
4709 + {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
4710
4711 /* 3. Combined interface devices matching on interface number */
4712 {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
4713 @@ -1283,7 +1292,6 @@ static const struct usb_device_id products[] = {
4714 {QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
4715 {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
4716 {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
4717 - {QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
4718 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
4719 {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
4720 {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
4721 @@ -1363,27 +1371,12 @@ static bool quectel_ec20_detected(struct usb_interface *intf)
4722 return false;
4723 }
4724
4725 -static bool quectel_diag_detected(struct usb_interface *intf)
4726 -{
4727 - struct usb_device *dev = interface_to_usbdev(intf);
4728 - struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
4729 - u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
4730 - u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
4731 -
4732 - if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
4733 - return false;
4734 -
4735 - if (id_product == 0x0306 || id_product == 0x0512)
4736 - return true;
4737 - else
4738 - return false;
4739 -}
4740 -
4741 static int qmi_wwan_probe(struct usb_interface *intf,
4742 const struct usb_device_id *prod)
4743 {
4744 struct usb_device_id *id = (struct usb_device_id *)prod;
4745 struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
4746 + const struct driver_info *info;
4747
4748 /* Workaround to enable dynamic IDs. This disables usbnet
4749 * blacklisting functionality. Which, if required, can be
4750 @@ -1417,10 +1410,14 @@ static int qmi_wwan_probe(struct usb_interface *intf,
4751 * we need to match on class/subclass/protocol. These values are
4752 * identical for the diagnostic- and QMI-interface, but bNumEndpoints is
4753 * different. Ignore the current interface if the number of endpoints
4754 - * the number for the diag interface (two).
4755 + * equals the number for the diag interface (two).
4756 */
4757 - if (quectel_diag_detected(intf))
4758 - return -ENODEV;
4759 + info = (void *)&id->driver_info;
4760 +
4761 + if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
4762 + if (desc->bNumEndpoints == 2)
4763 + return -ENODEV;
4764 + }
4765
4766 return usbnet_probe(intf, id);
4767 }
4768 diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
4769 index 2daf33342b23..1fc2bf66845c 100644
4770 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c
4771 +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
4772 @@ -1131,7 +1131,12 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
4773 params->wait);
4774
4775 out:
4776 + /* when the sent packet was not acked by receiver(ACK=0), rc will
4777 + * be -EAGAIN. In this case this function needs to return success,
4778 + * the ACK=0 will be reflected in tx_status.
4779 + */
4780 tx_status = (rc == 0);
4781 + rc = (rc == -EAGAIN) ? 0 : rc;
4782 cfg80211_mgmt_tx_status(wdev, cookie ? *cookie : 0, buf, len,
4783 tx_status, GFP_KERNEL);
4784
4785 diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
4786 index 42c02a20ec97..6e3b3031f29b 100644
4787 --- a/drivers/net/wireless/ath/wil6210/wmi.c
4788 +++ b/drivers/net/wireless/ath/wil6210/wmi.c
4789 @@ -3107,8 +3107,9 @@ int wmi_mgmt_tx(struct wil6210_vif *vif, const u8 *buf, size_t len)
4790 rc = wmi_call(wil, WMI_SW_TX_REQ_CMDID, vif->mid, cmd, total,
4791 WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
4792 if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
4793 - wil_err(wil, "mgmt_tx failed with status %d\n", evt.evt.status);
4794 - rc = -EINVAL;
4795 + wil_dbg_wmi(wil, "mgmt_tx failed with status %d\n",
4796 + evt.evt.status);
4797 + rc = -EAGAIN;
4798 }
4799
4800 kfree(cmd);
4801 @@ -3160,9 +3161,9 @@ int wmi_mgmt_tx_ext(struct wil6210_vif *vif, const u8 *buf, size_t len,
4802 rc = wmi_call(wil, WMI_SW_TX_REQ_EXT_CMDID, vif->mid, cmd, total,
4803 WMI_SW_TX_COMPLETE_EVENTID, &evt, sizeof(evt), 2000);
4804 if (!rc && evt.evt.status != WMI_FW_STATUS_SUCCESS) {
4805 - wil_err(wil, "mgmt_tx_ext failed with status %d\n",
4806 - evt.evt.status);
4807 - rc = -EINVAL;
4808 + wil_dbg_wmi(wil, "mgmt_tx_ext failed with status %d\n",
4809 + evt.evt.status);
4810 + rc = -EAGAIN;
4811 }
4812
4813 kfree(cmd);
4814 diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c
4815 index e99e766a3028..1cabae424839 100644
4816 --- a/drivers/net/wireless/atmel/at76c50x-usb.c
4817 +++ b/drivers/net/wireless/atmel/at76c50x-usb.c
4818 @@ -2585,8 +2585,8 @@ static int __init at76_mod_init(void)
4819 if (result < 0)
4820 printk(KERN_ERR DRIVER_NAME
4821 ": usb_register failed (status %d)\n", result);
4822 -
4823 - led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
4824 + else
4825 + led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
4826 return result;
4827 }
4828
4829 diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c
4830 index 6922cbb99a04..5a0699fb4b9a 100644
4831 --- a/drivers/net/wireless/broadcom/b43/phy_lp.c
4832 +++ b/drivers/net/wireless/broadcom/b43/phy_lp.c
4833 @@ -1834,7 +1834,7 @@ static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
4834 static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
4835 {
4836 struct b43_phy_lp *lpphy = dev->phy.lp;
4837 - struct lpphy_tx_gains gains, oldgains;
4838 + struct lpphy_tx_gains oldgains;
4839 int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
4840
4841 lpphy_read_tx_pctl_mode_from_hardware(dev);
4842 @@ -1848,9 +1848,9 @@ static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
4843 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
4844
4845 if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
4846 - lpphy_papd_cal(dev, gains, 0, 1, 30);
4847 + lpphy_papd_cal(dev, oldgains, 0, 1, 30);
4848 else
4849 - lpphy_papd_cal(dev, gains, 0, 1, 65);
4850 + lpphy_papd_cal(dev, oldgains, 0, 1, 65);
4851
4852 if (old_afe_ovr)
4853 lpphy_set_tx_gains(dev, oldgains);
4854 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
4855 index 6f3faaf1b1cb..c7c520f327f2 100644
4856 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
4857 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
4858 @@ -3466,6 +3466,8 @@ brcmf_wowl_nd_results(struct brcmf_if *ifp, const struct brcmf_event_msg *e,
4859 }
4860
4861 netinfo = brcmf_get_netinfo_array(pfn_result);
4862 + if (netinfo->SSID_len > IEEE80211_MAX_SSID_LEN)
4863 + netinfo->SSID_len = IEEE80211_MAX_SSID_LEN;
4864 memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len);
4865 cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len;
4866 cfg->wowl.nd->n_channels = 1;
4867 @@ -5366,6 +5368,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
4868 conn_info->req_ie =
4869 kmemdup(cfg->extra_buf, conn_info->req_ie_len,
4870 GFP_KERNEL);
4871 + if (!conn_info->req_ie)
4872 + conn_info->req_ie_len = 0;
4873 } else {
4874 conn_info->req_ie_len = 0;
4875 conn_info->req_ie = NULL;
4876 @@ -5382,6 +5386,8 @@ static s32 brcmf_get_assoc_ies(struct brcmf_cfg80211_info *cfg,
4877 conn_info->resp_ie =
4878 kmemdup(cfg->extra_buf, conn_info->resp_ie_len,
4879 GFP_KERNEL);
4880 + if (!conn_info->resp_ie)
4881 + conn_info->resp_ie_len = 0;
4882 } else {
4883 conn_info->resp_ie_len = 0;
4884 conn_info->resp_ie = NULL;
4885 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
4886 index 860a4372cb56..36a04c1144e5 100644
4887 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
4888 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c
4889 @@ -464,7 +464,8 @@ void brcmf_rx_frame(struct device *dev, struct sk_buff *skb, bool handle_event)
4890 } else {
4891 /* Process special event packets */
4892 if (handle_event)
4893 - brcmf_fweh_process_skb(ifp->drvr, skb);
4894 + brcmf_fweh_process_skb(ifp->drvr, skb,
4895 + BCMILCP_SUBTYPE_VENDOR_LONG);
4896
4897 brcmf_netif_rx(ifp, skb);
4898 }
4899 @@ -481,7 +482,7 @@ void brcmf_rx_event(struct device *dev, struct sk_buff *skb)
4900 if (brcmf_rx_hdrpull(drvr, skb, &ifp))
4901 return;
4902
4903 - brcmf_fweh_process_skb(ifp->drvr, skb);
4904 + brcmf_fweh_process_skb(ifp->drvr, skb, 0);
4905 brcmu_pkt_buf_free_skb(skb);
4906 }
4907
4908 @@ -783,17 +784,17 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
4909 bool rtnl_locked)
4910 {
4911 struct brcmf_if *ifp;
4912 + int ifidx;
4913
4914 ifp = drvr->iflist[bsscfgidx];
4915 - drvr->iflist[bsscfgidx] = NULL;
4916 if (!ifp) {
4917 brcmf_err("Null interface, bsscfgidx=%d\n", bsscfgidx);
4918 return;
4919 }
4920 brcmf_dbg(TRACE, "Enter, bsscfgidx=%d, ifidx=%d\n", bsscfgidx,
4921 ifp->ifidx);
4922 - if (drvr->if2bss[ifp->ifidx] == bsscfgidx)
4923 - drvr->if2bss[ifp->ifidx] = BRCMF_BSSIDX_INVALID;
4924 + ifidx = ifp->ifidx;
4925 +
4926 if (ifp->ndev) {
4927 if (bsscfgidx == 0) {
4928 if (ifp->ndev->netdev_ops == &brcmf_netdev_ops_pri) {
4929 @@ -821,6 +822,10 @@ static void brcmf_del_if(struct brcmf_pub *drvr, s32 bsscfgidx,
4930 brcmf_p2p_ifp_removed(ifp, rtnl_locked);
4931 kfree(ifp);
4932 }
4933 +
4934 + drvr->iflist[bsscfgidx] = NULL;
4935 + if (drvr->if2bss[ifidx] == bsscfgidx)
4936 + drvr->if2bss[ifidx] = BRCMF_BSSIDX_INVALID;
4937 }
4938
4939 void brcmf_remove_interface(struct brcmf_if *ifp, bool rtnl_locked)
4940 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
4941 index 816f80ea925b..ebd66fe0d949 100644
4942 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
4943 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.h
4944 @@ -211,7 +211,7 @@ enum brcmf_fweh_event_code {
4945 */
4946 #define BRCM_OUI "\x00\x10\x18"
4947 #define BCMILCP_BCM_SUBTYPE_EVENT 1
4948 -
4949 +#define BCMILCP_SUBTYPE_VENDOR_LONG 32769
4950
4951 /**
4952 * struct brcm_ethhdr - broadcom specific ether header.
4953 @@ -334,10 +334,10 @@ void brcmf_fweh_process_event(struct brcmf_pub *drvr,
4954 void brcmf_fweh_p2pdev_setup(struct brcmf_if *ifp, bool ongoing);
4955
4956 static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
4957 - struct sk_buff *skb)
4958 + struct sk_buff *skb, u16 stype)
4959 {
4960 struct brcmf_event *event_packet;
4961 - u16 usr_stype;
4962 + u16 subtype, usr_stype;
4963
4964 /* only process events when protocol matches */
4965 if (skb->protocol != cpu_to_be16(ETH_P_LINK_CTL))
4966 @@ -346,8 +346,16 @@ static inline void brcmf_fweh_process_skb(struct brcmf_pub *drvr,
4967 if ((skb->len + ETH_HLEN) < sizeof(*event_packet))
4968 return;
4969
4970 - /* check for BRCM oui match */
4971 event_packet = (struct brcmf_event *)skb_mac_header(skb);
4972 +
4973 + /* check subtype if needed */
4974 + if (unlikely(stype)) {
4975 + subtype = get_unaligned_be16(&event_packet->hdr.subtype);
4976 + if (subtype != stype)
4977 + return;
4978 + }
4979 +
4980 + /* check for BRCM oui match */
4981 if (memcmp(BRCM_OUI, &event_packet->hdr.oui[0],
4982 sizeof(event_packet->hdr.oui)))
4983 return;
4984 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
4985 index f3cbf78c8899..5a0a29c4cdea 100644
4986 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
4987 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c
4988 @@ -579,24 +579,6 @@ static bool brcmf_fws_ifidx_match(struct sk_buff *skb, void *arg)
4989 return ifidx == *(int *)arg;
4990 }
4991
4992 -static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
4993 - int ifidx)
4994 -{
4995 - bool (*matchfn)(struct sk_buff *, void *) = NULL;
4996 - struct sk_buff *skb;
4997 - int prec;
4998 -
4999 - if (ifidx != -1)
5000 - matchfn = brcmf_fws_ifidx_match;
5001 - for (prec = 0; prec < q->num_prec; prec++) {
5002 - skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
5003 - while (skb) {
5004 - brcmu_pkt_buf_free_skb(skb);
5005 - skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
5006 - }
5007 - }
5008 -}
5009 -
5010 static void brcmf_fws_hanger_init(struct brcmf_fws_hanger *hanger)
5011 {
5012 int i;
5013 @@ -668,6 +650,28 @@ static inline int brcmf_fws_hanger_poppkt(struct brcmf_fws_hanger *h,
5014 return 0;
5015 }
5016
5017 +static void brcmf_fws_psq_flush(struct brcmf_fws_info *fws, struct pktq *q,
5018 + int ifidx)
5019 +{
5020 + bool (*matchfn)(struct sk_buff *, void *) = NULL;
5021 + struct sk_buff *skb;
5022 + int prec;
5023 + u32 hslot;
5024 +
5025 + if (ifidx != -1)
5026 + matchfn = brcmf_fws_ifidx_match;
5027 + for (prec = 0; prec < q->num_prec; prec++) {
5028 + skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
5029 + while (skb) {
5030 + hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT);
5031 + brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb,
5032 + true);
5033 + brcmu_pkt_buf_free_skb(skb);
5034 + skb = brcmu_pktq_pdeq_match(q, prec, matchfn, &ifidx);
5035 + }
5036 + }
5037 +}
5038 +
5039 static int brcmf_fws_hanger_mark_suppressed(struct brcmf_fws_hanger *h,
5040 u32 slot_id)
5041 {
5042 @@ -2168,6 +2172,8 @@ void brcmf_fws_del_interface(struct brcmf_if *ifp)
5043 brcmf_fws_lock(fws);
5044 ifp->fws_desc = NULL;
5045 brcmf_dbg(TRACE, "deleting %s\n", entry->name);
5046 + brcmf_fws_macdesc_cleanup(fws, &fws->desc.iface[ifp->ifidx],
5047 + ifp->ifidx);
5048 brcmf_fws_macdesc_deinit(entry);
5049 brcmf_fws_cleanup(fws, ifp->ifidx);
5050 brcmf_fws_unlock(fws);
5051 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
5052 index 4e8397a0cbc8..ee922b052561 100644
5053 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
5054 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c
5055 @@ -1116,7 +1116,7 @@ static void brcmf_msgbuf_process_event(struct brcmf_msgbuf *msgbuf, void *buf)
5056
5057 skb->protocol = eth_type_trans(skb, ifp->ndev);
5058
5059 - brcmf_fweh_process_skb(ifp->drvr, skb);
5060 + brcmf_fweh_process_skb(ifp->drvr, skb, 0);
5061
5062 exit:
5063 brcmu_pkt_buf_free_skb(skb);
5064 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
5065 index a4308c6e72d7..44ead0fea7c6 100644
5066 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
5067 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c
5068 @@ -160,7 +160,7 @@ struct brcmf_usbdev_info {
5069
5070 struct usb_device *usbdev;
5071 struct device *dev;
5072 - struct mutex dev_init_lock;
5073 + struct completion dev_init_done;
5074
5075 int ctl_in_pipe, ctl_out_pipe;
5076 struct urb *ctl_urb; /* URB for control endpoint */
5077 @@ -684,12 +684,18 @@ static int brcmf_usb_up(struct device *dev)
5078
5079 static void brcmf_cancel_all_urbs(struct brcmf_usbdev_info *devinfo)
5080 {
5081 + int i;
5082 +
5083 if (devinfo->ctl_urb)
5084 usb_kill_urb(devinfo->ctl_urb);
5085 if (devinfo->bulk_urb)
5086 usb_kill_urb(devinfo->bulk_urb);
5087 - brcmf_usb_free_q(&devinfo->tx_postq, true);
5088 - brcmf_usb_free_q(&devinfo->rx_postq, true);
5089 + if (devinfo->tx_reqs)
5090 + for (i = 0; i < devinfo->bus_pub.ntxq; i++)
5091 + usb_kill_urb(devinfo->tx_reqs[i].urb);
5092 + if (devinfo->rx_reqs)
5093 + for (i = 0; i < devinfo->bus_pub.nrxq; i++)
5094 + usb_kill_urb(devinfo->rx_reqs[i].urb);
5095 }
5096
5097 static void brcmf_usb_down(struct device *dev)
5098 @@ -1195,11 +1201,11 @@ static void brcmf_usb_probe_phase2(struct device *dev, int ret,
5099 if (ret)
5100 goto error;
5101
5102 - mutex_unlock(&devinfo->dev_init_lock);
5103 + complete(&devinfo->dev_init_done);
5104 return;
5105 error:
5106 brcmf_dbg(TRACE, "failed: dev=%s, err=%d\n", dev_name(dev), ret);
5107 - mutex_unlock(&devinfo->dev_init_lock);
5108 + complete(&devinfo->dev_init_done);
5109 device_release_driver(dev);
5110 }
5111
5112 @@ -1267,7 +1273,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
5113 if (ret)
5114 goto fail;
5115 /* we are done */
5116 - mutex_unlock(&devinfo->dev_init_lock);
5117 + complete(&devinfo->dev_init_done);
5118 return 0;
5119 }
5120 bus->chip = bus_pub->devid;
5121 @@ -1327,11 +1333,10 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
5122
5123 devinfo->usbdev = usb;
5124 devinfo->dev = &usb->dev;
5125 - /* Take an init lock, to protect for disconnect while still loading.
5126 + /* Init completion, to protect for disconnect while still loading.
5127 * Necessary because of the asynchronous firmware load construction
5128 */
5129 - mutex_init(&devinfo->dev_init_lock);
5130 - mutex_lock(&devinfo->dev_init_lock);
5131 + init_completion(&devinfo->dev_init_done);
5132
5133 usb_set_intfdata(intf, devinfo);
5134
5135 @@ -1409,7 +1414,7 @@ brcmf_usb_probe(struct usb_interface *intf, const struct usb_device_id *id)
5136 return 0;
5137
5138 fail:
5139 - mutex_unlock(&devinfo->dev_init_lock);
5140 + complete(&devinfo->dev_init_done);
5141 kfree(devinfo);
5142 usb_set_intfdata(intf, NULL);
5143 return ret;
5144 @@ -1424,7 +1429,7 @@ brcmf_usb_disconnect(struct usb_interface *intf)
5145 devinfo = (struct brcmf_usbdev_info *)usb_get_intfdata(intf);
5146
5147 if (devinfo) {
5148 - mutex_lock(&devinfo->dev_init_lock);
5149 + wait_for_completion(&devinfo->dev_init_done);
5150 /* Make sure that devinfo still exists. Firmware probe routines
5151 * may have released the device and cleared the intfdata.
5152 */
5153 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
5154 index 8eff2753abad..d493021f6031 100644
5155 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
5156 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/vendor.c
5157 @@ -35,9 +35,10 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
5158 struct brcmf_if *ifp;
5159 const struct brcmf_vndr_dcmd_hdr *cmdhdr = data;
5160 struct sk_buff *reply;
5161 - int ret, payload, ret_len;
5162 + unsigned int payload, ret_len;
5163 void *dcmd_buf = NULL, *wr_pointer;
5164 u16 msglen, maxmsglen = PAGE_SIZE - 0x100;
5165 + int ret;
5166
5167 if (len < sizeof(*cmdhdr)) {
5168 brcmf_err("vendor command too short: %d\n", len);
5169 @@ -65,7 +66,7 @@ static int brcmf_cfg80211_vndr_cmds_dcmd_handler(struct wiphy *wiphy,
5170 brcmf_err("oversize return buffer %d\n", ret_len);
5171 ret_len = BRCMF_DCMD_MAXLEN;
5172 }
5173 - payload = max(ret_len, len) + 1;
5174 + payload = max_t(unsigned int, ret_len, len) + 1;
5175 dcmd_buf = vzalloc(payload);
5176 if (NULL == dcmd_buf)
5177 return -ENOMEM;
5178 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
5179 index b2905f01b7df..6dcd5374d9b4 100644
5180 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
5181 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
5182 @@ -1388,10 +1388,15 @@ out_err:
5183 static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
5184 {
5185 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
5186 - struct iwl_rxq *rxq = &trans_pcie->rxq[queue];
5187 + struct iwl_rxq *rxq;
5188 u32 r, i, count = 0;
5189 bool emergency = false;
5190
5191 + if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
5192 + return;
5193 +
5194 + rxq = &trans_pcie->rxq[queue];
5195 +
5196 restart:
5197 spin_lock(&rxq->lock);
5198 /* uCode's read index (stored in shared DRAM) indicates the last Rx
5199 diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
5200 index 2d87ebbfa4da..47ec5293c045 100644
5201 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
5202 +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
5203 @@ -4045,16 +4045,20 @@ static int mwifiex_tm_cmd(struct wiphy *wiphy, struct wireless_dev *wdev,
5204
5205 if (mwifiex_send_cmd(priv, 0, 0, 0, hostcmd, true)) {
5206 dev_err(priv->adapter->dev, "Failed to process hostcmd\n");
5207 + kfree(hostcmd);
5208 return -EFAULT;
5209 }
5210
5211 /* process hostcmd response*/
5212 skb = cfg80211_testmode_alloc_reply_skb(wiphy, hostcmd->len);
5213 - if (!skb)
5214 + if (!skb) {
5215 + kfree(hostcmd);
5216 return -ENOMEM;
5217 + }
5218 err = nla_put(skb, MWIFIEX_TM_ATTR_DATA,
5219 hostcmd->len, hostcmd->cmd);
5220 if (err) {
5221 + kfree(hostcmd);
5222 kfree_skb(skb);
5223 return -EMSGSIZE;
5224 }
5225 diff --git a/drivers/net/wireless/marvell/mwifiex/cfp.c b/drivers/net/wireless/marvell/mwifiex/cfp.c
5226 index bfe84e55df77..f1522fb1c1e8 100644
5227 --- a/drivers/net/wireless/marvell/mwifiex/cfp.c
5228 +++ b/drivers/net/wireless/marvell/mwifiex/cfp.c
5229 @@ -531,5 +531,8 @@ u8 mwifiex_adjust_data_rate(struct mwifiex_private *priv,
5230 rate_index = (rx_rate > MWIFIEX_RATE_INDEX_OFDM0) ?
5231 rx_rate - 1 : rx_rate;
5232
5233 + if (rate_index >= MWIFIEX_MAX_AC_RX_RATES)
5234 + rate_index = MWIFIEX_MAX_AC_RX_RATES - 1;
5235 +
5236 return rate_index;
5237 }
5238 diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
5239 index ef9b502ce576..a3189294ecb8 100644
5240 --- a/drivers/net/wireless/realtek/rtlwifi/base.c
5241 +++ b/drivers/net/wireless/realtek/rtlwifi/base.c
5242 @@ -469,6 +469,11 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
5243 /* <2> work queue */
5244 rtlpriv->works.hw = hw;
5245 rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
5246 + if (unlikely(!rtlpriv->works.rtl_wq)) {
5247 + pr_err("Failed to allocate work queue\n");
5248 + return;
5249 + }
5250 +
5251 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
5252 (void *)rtl_watchdog_wq_callback);
5253 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
5254 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
5255 index 63874512598b..b5f91c994c79 100644
5256 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
5257 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/fw.c
5258 @@ -622,6 +622,8 @@ void rtl88e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
5259 u1rsvdpageloc, 3);
5260
5261 skb = dev_alloc_skb(totalpacketlen);
5262 + if (!skb)
5263 + return;
5264 skb_put_data(skb, &reserved_page_packet, totalpacketlen);
5265
5266 rtstatus = rtl_cmd_send_packet(hw, skb);
5267 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
5268 index f3bff66e85d0..81ec0e6e07c1 100644
5269 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
5270 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192c/fw_common.c
5271 @@ -646,6 +646,8 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
5272
5273
5274 skb = dev_alloc_skb(totalpacketlen);
5275 + if (!skb)
5276 + return;
5277 skb_put_data(skb, &reserved_page_packet, totalpacketlen);
5278
5279 if (cmd_send_packet)
5280 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
5281 index 84a0d0eb72e1..a933490928ba 100644
5282 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
5283 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/fw.c
5284 @@ -766,6 +766,8 @@ void rtl92ee_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
5285 u1rsvdpageloc, 3);
5286
5287 skb = dev_alloc_skb(totalpacketlen);
5288 + if (!skb)
5289 + return;
5290 skb_put_data(skb, &reserved_page_packet, totalpacketlen);
5291
5292 rtstatus = rtl_cmd_send_packet(hw, skb);
5293 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
5294 index bf9859f74b6f..52f108744e96 100644
5295 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
5296 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/fw.c
5297 @@ -470,6 +470,8 @@ void rtl8723e_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
5298 u1rsvdpageloc, 3);
5299
5300 skb = dev_alloc_skb(totalpacketlen);
5301 + if (!skb)
5302 + return;
5303 skb_put_data(skb, &reserved_page_packet, totalpacketlen);
5304
5305 rtstatus = rtl_cmd_send_packet(hw, skb);
5306 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
5307 index f2441fbb92f1..307c2bd77f06 100644
5308 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
5309 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/fw.c
5310 @@ -584,6 +584,8 @@ void rtl8723be_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
5311 u1rsvdpageloc, sizeof(u1rsvdpageloc));
5312
5313 skb = dev_alloc_skb(totalpacketlen);
5314 + if (!skb)
5315 + return;
5316 skb_put_data(skb, &reserved_page_packet, totalpacketlen);
5317
5318 rtstatus = rtl_cmd_send_packet(hw, skb);
5319 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
5320 index d868a034659f..d7235f6165fd 100644
5321 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
5322 +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/fw.c
5323 @@ -1645,6 +1645,8 @@ out:
5324 &reserved_page_packet_8812[0], totalpacketlen);
5325
5326 skb = dev_alloc_skb(totalpacketlen);
5327 + if (!skb)
5328 + return;
5329 skb_put_data(skb, &reserved_page_packet_8812, totalpacketlen);
5330
5331 rtstatus = rtl_cmd_send_packet(hw, skb);
5332 @@ -1781,6 +1783,8 @@ out:
5333 &reserved_page_packet_8821[0], totalpacketlen);
5334
5335 skb = dev_alloc_skb(totalpacketlen);
5336 + if (!skb)
5337 + return;
5338 skb_put_data(skb, &reserved_page_packet_8821, totalpacketlen);
5339
5340 rtstatus = rtl_cmd_send_packet(hw, skb);
5341 diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
5342 index 4e510cbe0a89..be59d66585d6 100644
5343 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
5344 +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
5345 @@ -188,27 +188,27 @@ bool rsi_is_cipher_wep(struct rsi_common *common)
5346 * @adapter: Pointer to the adapter structure.
5347 * @band: Operating band to be set.
5348 *
5349 - * Return: None.
5350 + * Return: int - 0 on success, negative error on failure.
5351 */
5352 -static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
5353 +static int rsi_register_rates_channels(struct rsi_hw *adapter, int band)
5354 {
5355 struct ieee80211_supported_band *sbands = &adapter->sbands[band];
5356 void *channels = NULL;
5357
5358 if (band == NL80211_BAND_2GHZ) {
5359 - channels = kmalloc(sizeof(rsi_2ghz_channels), GFP_KERNEL);
5360 - memcpy(channels,
5361 - rsi_2ghz_channels,
5362 - sizeof(rsi_2ghz_channels));
5363 + channels = kmemdup(rsi_2ghz_channels, sizeof(rsi_2ghz_channels),
5364 + GFP_KERNEL);
5365 + if (!channels)
5366 + return -ENOMEM;
5367 sbands->band = NL80211_BAND_2GHZ;
5368 sbands->n_channels = ARRAY_SIZE(rsi_2ghz_channels);
5369 sbands->bitrates = rsi_rates;
5370 sbands->n_bitrates = ARRAY_SIZE(rsi_rates);
5371 } else {
5372 - channels = kmalloc(sizeof(rsi_5ghz_channels), GFP_KERNEL);
5373 - memcpy(channels,
5374 - rsi_5ghz_channels,
5375 - sizeof(rsi_5ghz_channels));
5376 + channels = kmemdup(rsi_5ghz_channels, sizeof(rsi_5ghz_channels),
5377 + GFP_KERNEL);
5378 + if (!channels)
5379 + return -ENOMEM;
5380 sbands->band = NL80211_BAND_5GHZ;
5381 sbands->n_channels = ARRAY_SIZE(rsi_5ghz_channels);
5382 sbands->bitrates = &rsi_rates[4];
5383 @@ -227,6 +227,7 @@ static void rsi_register_rates_channels(struct rsi_hw *adapter, int band)
5384 sbands->ht_cap.mcs.rx_mask[0] = 0xff;
5385 sbands->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
5386 /* sbands->ht_cap.mcs.rx_highest = 0x82; */
5387 + return 0;
5388 }
5389
5390 /**
5391 @@ -1985,11 +1986,16 @@ int rsi_mac80211_attach(struct rsi_common *common)
5392 wiphy->available_antennas_rx = 1;
5393 wiphy->available_antennas_tx = 1;
5394
5395 - rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
5396 + status = rsi_register_rates_channels(adapter, NL80211_BAND_2GHZ);
5397 + if (status)
5398 + return status;
5399 wiphy->bands[NL80211_BAND_2GHZ] =
5400 &adapter->sbands[NL80211_BAND_2GHZ];
5401 if (common->num_supp_bands > 1) {
5402 - rsi_register_rates_channels(adapter, NL80211_BAND_5GHZ);
5403 + status = rsi_register_rates_channels(adapter,
5404 + NL80211_BAND_5GHZ);
5405 + if (status)
5406 + return status;
5407 wiphy->bands[NL80211_BAND_5GHZ] =
5408 &adapter->sbands[NL80211_BAND_5GHZ];
5409 }
5410 diff --git a/drivers/net/wireless/st/cw1200/main.c b/drivers/net/wireless/st/cw1200/main.c
5411 index 90dc979f260b..c1608f0bf6d0 100644
5412 --- a/drivers/net/wireless/st/cw1200/main.c
5413 +++ b/drivers/net/wireless/st/cw1200/main.c
5414 @@ -345,6 +345,11 @@ static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr,
5415 mutex_init(&priv->wsm_cmd_mux);
5416 mutex_init(&priv->conf_mutex);
5417 priv->workqueue = create_singlethread_workqueue("cw1200_wq");
5418 + if (!priv->workqueue) {
5419 + ieee80211_free_hw(hw);
5420 + return NULL;
5421 + }
5422 +
5423 sema_init(&priv->scan.lock, 1);
5424 INIT_WORK(&priv->scan.work, cw1200_scan_work);
5425 INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work);
5426 diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
5427 index cff027fc2676..a7ce2f1761a2 100644
5428 --- a/drivers/nvdimm/pmem.c
5429 +++ b/drivers/nvdimm/pmem.c
5430 @@ -281,16 +281,22 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
5431 return __pmem_direct_access(pmem, pgoff, nr_pages, kaddr, pfn);
5432 }
5433
5434 +/*
5435 + * Use the 'no check' versions of copy_from_iter_flushcache() and
5436 + * copy_to_iter_mcsafe() to bypass HARDENED_USERCOPY overhead. Bounds
5437 + * checking, both file offset and device offset, is handled by
5438 + * dax_iomap_actor()
5439 + */
5440 static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
5441 void *addr, size_t bytes, struct iov_iter *i)
5442 {
5443 - return copy_from_iter_flushcache(addr, bytes, i);
5444 + return _copy_from_iter_flushcache(addr, bytes, i);
5445 }
5446
5447 static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
5448 void *addr, size_t bytes, struct iov_iter *i)
5449 {
5450 - return copy_to_iter_mcsafe(addr, bytes, i);
5451 + return _copy_to_iter_mcsafe(addr, bytes, i);
5452 }
5453
5454 static const struct dax_operations pmem_dax_ops = {
5455 diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
5456 index 2cdb3032ca0f..abfb46378cc1 100644
5457 --- a/drivers/nvme/host/core.c
5458 +++ b/drivers/nvme/host/core.c
5459 @@ -1480,6 +1480,10 @@ static void nvme_update_disk_info(struct gendisk *disk,
5460 sector_t capacity = le64_to_cpup(&id->nsze) << (ns->lba_shift - 9);
5461 unsigned short bs = 1 << ns->lba_shift;
5462
5463 + if (ns->lba_shift > PAGE_SHIFT) {
5464 + /* unsupported block size, set capacity to 0 later */
5465 + bs = (1 << 9);
5466 + }
5467 blk_mq_freeze_queue(disk->queue);
5468 blk_integrity_unregister(disk);
5469
5470 @@ -1490,7 +1494,8 @@ static void nvme_update_disk_info(struct gendisk *disk,
5471 if (ns->ms && !ns->ext &&
5472 (ns->ctrl->ops->flags & NVME_F_METADATA_SUPPORTED))
5473 nvme_init_integrity(disk, ns->ms, ns->pi_type);
5474 - if (ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk))
5475 + if ((ns->ms && !nvme_ns_has_pi(ns) && !blk_get_integrity(disk)) ||
5476 + ns->lba_shift > PAGE_SHIFT)
5477 capacity = 0;
5478
5479 set_capacity(disk, capacity);
5480 diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
5481 index 0939a4e178fb..e4f167e35353 100644
5482 --- a/drivers/nvme/host/rdma.c
5483 +++ b/drivers/nvme/host/rdma.c
5484 @@ -880,8 +880,9 @@ static void nvme_rdma_teardown_admin_queue(struct nvme_rdma_ctrl *ctrl,
5485 {
5486 blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
5487 nvme_rdma_stop_queue(&ctrl->queues[0]);
5488 - blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request,
5489 - &ctrl->ctrl);
5490 + if (ctrl->ctrl.admin_tagset)
5491 + blk_mq_tagset_busy_iter(ctrl->ctrl.admin_tagset,
5492 + nvme_cancel_request, &ctrl->ctrl);
5493 blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
5494 nvme_rdma_destroy_admin_queue(ctrl, remove);
5495 }
5496 @@ -892,8 +893,9 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl,
5497 if (ctrl->ctrl.queue_count > 1) {
5498 nvme_stop_queues(&ctrl->ctrl);
5499 nvme_rdma_stop_io_queues(ctrl);
5500 - blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request,
5501 - &ctrl->ctrl);
5502 + if (ctrl->ctrl.tagset)
5503 + blk_mq_tagset_busy_iter(ctrl->ctrl.tagset,
5504 + nvme_cancel_request, &ctrl->ctrl);
5505 if (remove)
5506 nvme_start_queues(&ctrl->ctrl);
5507 nvme_rdma_destroy_io_queues(ctrl, remove);
5508 diff --git a/drivers/perf/arm-cci.c b/drivers/perf/arm-cci.c
5509 index 1bfeb160c5b1..14a541c453e5 100644
5510 --- a/drivers/perf/arm-cci.c
5511 +++ b/drivers/perf/arm-cci.c
5512 @@ -1692,21 +1692,24 @@ static int cci_pmu_probe(struct platform_device *pdev)
5513 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock);
5514 mutex_init(&cci_pmu->reserve_mutex);
5515 atomic_set(&cci_pmu->active_events, 0);
5516 - cci_pmu->cpu = get_cpu();
5517 -
5518 - ret = cci_pmu_init(cci_pmu, pdev);
5519 - if (ret) {
5520 - put_cpu();
5521 - return ret;
5522 - }
5523
5524 + cci_pmu->cpu = raw_smp_processor_id();
5525 + g_cci_pmu = cci_pmu;
5526 cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE,
5527 "perf/arm/cci:online", NULL,
5528 cci_pmu_offline_cpu);
5529 - put_cpu();
5530 - g_cci_pmu = cci_pmu;
5531 +
5532 + ret = cci_pmu_init(cci_pmu, pdev);
5533 + if (ret)
5534 + goto error_pmu_init;
5535 +
5536 pr_info("ARM %s PMU driver probed", cci_pmu->model->name);
5537 return 0;
5538 +
5539 +error_pmu_init:
5540 + cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE);
5541 + g_cci_pmu = NULL;
5542 + return ret;
5543 }
5544
5545 static int cci_pmu_remove(struct platform_device *pdev)
5546 diff --git a/drivers/phy/allwinner/phy-sun4i-usb.c b/drivers/phy/allwinner/phy-sun4i-usb.c
5547 index 15c8fc2abf01..1f8809bab002 100644
5548 --- a/drivers/phy/allwinner/phy-sun4i-usb.c
5549 +++ b/drivers/phy/allwinner/phy-sun4i-usb.c
5550 @@ -550,6 +550,7 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
5551 struct sun4i_usb_phy_data *data =
5552 container_of(work, struct sun4i_usb_phy_data, detect.work);
5553 struct phy *phy0 = data->phys[0].phy;
5554 + struct sun4i_usb_phy *phy = phy_get_drvdata(phy0);
5555 bool force_session_end, id_notify = false, vbus_notify = false;
5556 int id_det, vbus_det;
5557
5558 @@ -606,6 +607,9 @@ static void sun4i_usb_phy0_id_vbus_det_scan(struct work_struct *work)
5559 mutex_unlock(&phy0->mutex);
5560 }
5561
5562 + /* Enable PHY0 passby for host mode only. */
5563 + sun4i_usb_phy_passby(phy, !id_det);
5564 +
5565 /* Re-route PHY0 if necessary */
5566 if (data->cfg->phy0_dual_route)
5567 sun4i_usb_phy0_reroute(data, id_det);
5568 diff --git a/drivers/phy/motorola/Kconfig b/drivers/phy/motorola/Kconfig
5569 index 82651524ffb9..718f8729701d 100644
5570 --- a/drivers/phy/motorola/Kconfig
5571 +++ b/drivers/phy/motorola/Kconfig
5572 @@ -13,7 +13,7 @@ config PHY_CPCAP_USB
5573
5574 config PHY_MAPPHONE_MDM6600
5575 tristate "Motorola Mapphone MDM6600 modem USB PHY driver"
5576 - depends on OF && USB_SUPPORT
5577 + depends on OF && USB_SUPPORT && GPIOLIB
5578 select GENERIC_PHY
5579 help
5580 Enable this for MDM6600 USB modem to work on Motorola phones
5581 diff --git a/drivers/pinctrl/pinctrl-pistachio.c b/drivers/pinctrl/pinctrl-pistachio.c
5582 index 302190d1558d..0d7d379e9bb8 100644
5583 --- a/drivers/pinctrl/pinctrl-pistachio.c
5584 +++ b/drivers/pinctrl/pinctrl-pistachio.c
5585 @@ -1368,6 +1368,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
5586 if (!of_find_property(child, "gpio-controller", NULL)) {
5587 dev_err(pctl->dev,
5588 "No gpio-controller property for bank %u\n", i);
5589 + of_node_put(child);
5590 ret = -ENODEV;
5591 goto err;
5592 }
5593 @@ -1375,6 +1376,7 @@ static int pistachio_gpio_register(struct pistachio_pinctrl *pctl)
5594 irq = irq_of_parse_and_map(child, 0);
5595 if (irq < 0) {
5596 dev_err(pctl->dev, "No IRQ for bank %u: %d\n", i, irq);
5597 + of_node_put(child);
5598 ret = irq;
5599 goto err;
5600 }
5601 diff --git a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
5602 index 44c6b753f692..85ddf49a5188 100644
5603 --- a/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
5604 +++ b/drivers/pinctrl/samsung/pinctrl-exynos-arm.c
5605 @@ -71,6 +71,7 @@ s5pv210_retention_init(struct samsung_pinctrl_drv_data *drvdata,
5606 }
5607
5608 clk_base = of_iomap(np, 0);
5609 + of_node_put(np);
5610 if (!clk_base) {
5611 pr_err("%s: failed to map clock registers\n", __func__);
5612 return ERR_PTR(-EINVAL);
5613 diff --git a/drivers/pinctrl/zte/pinctrl-zx.c b/drivers/pinctrl/zte/pinctrl-zx.c
5614 index caa44dd2880a..3cb69309912b 100644
5615 --- a/drivers/pinctrl/zte/pinctrl-zx.c
5616 +++ b/drivers/pinctrl/zte/pinctrl-zx.c
5617 @@ -411,6 +411,7 @@ int zx_pinctrl_init(struct platform_device *pdev,
5618 }
5619
5620 zpctl->aux_base = of_iomap(np, 0);
5621 + of_node_put(np);
5622 if (!zpctl->aux_base)
5623 return -ENOMEM;
5624
5625 diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c
5626 index 01ffc0ef8033..fbcf13bbbd8d 100644
5627 --- a/drivers/rtc/rtc-88pm860x.c
5628 +++ b/drivers/rtc/rtc-88pm860x.c
5629 @@ -414,7 +414,7 @@ static int pm860x_rtc_remove(struct platform_device *pdev)
5630 struct pm860x_rtc_info *info = platform_get_drvdata(pdev);
5631
5632 #ifdef VRTC_CALIBRATION
5633 - flush_scheduled_work();
5634 + cancel_delayed_work_sync(&info->calib_work);
5635 /* disable measurement */
5636 pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0);
5637 #endif /* VRTC_CALIBRATION */
5638 diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c
5639 index c5908cfea234..8e6c9b3bcc29 100644
5640 --- a/drivers/rtc/rtc-stm32.c
5641 +++ b/drivers/rtc/rtc-stm32.c
5642 @@ -788,11 +788,14 @@ static int stm32_rtc_probe(struct platform_device *pdev)
5643 ret = device_init_wakeup(&pdev->dev, true);
5644 if (rtc->data->has_wakeirq) {
5645 rtc->wakeirq_alarm = platform_get_irq(pdev, 1);
5646 - if (rtc->wakeirq_alarm <= 0)
5647 - ret = rtc->wakeirq_alarm;
5648 - else
5649 + if (rtc->wakeirq_alarm > 0) {
5650 ret = dev_pm_set_dedicated_wake_irq(&pdev->dev,
5651 rtc->wakeirq_alarm);
5652 + } else {
5653 + ret = rtc->wakeirq_alarm;
5654 + if (rtc->wakeirq_alarm == -EPROBE_DEFER)
5655 + goto err;
5656 + }
5657 }
5658 if (ret)
5659 dev_warn(&pdev->dev, "alarm can't wake up the system: %d", ret);
5660 diff --git a/drivers/rtc/rtc-xgene.c b/drivers/rtc/rtc-xgene.c
5661 index 153820876a82..2f741f455c30 100644
5662 --- a/drivers/rtc/rtc-xgene.c
5663 +++ b/drivers/rtc/rtc-xgene.c
5664 @@ -168,6 +168,10 @@ static int xgene_rtc_probe(struct platform_device *pdev)
5665 if (IS_ERR(pdata->csr_base))
5666 return PTR_ERR(pdata->csr_base);
5667
5668 + pdata->rtc = devm_rtc_allocate_device(&pdev->dev);
5669 + if (IS_ERR(pdata->rtc))
5670 + return PTR_ERR(pdata->rtc);
5671 +
5672 irq = platform_get_irq(pdev, 0);
5673 if (irq < 0) {
5674 dev_err(&pdev->dev, "No IRQ resource\n");
5675 @@ -198,15 +202,15 @@ static int xgene_rtc_probe(struct platform_device *pdev)
5676 return ret;
5677 }
5678
5679 - pdata->rtc = devm_rtc_device_register(&pdev->dev, pdev->name,
5680 - &xgene_rtc_ops, THIS_MODULE);
5681 - if (IS_ERR(pdata->rtc)) {
5682 - clk_disable_unprepare(pdata->clk);
5683 - return PTR_ERR(pdata->rtc);
5684 - }
5685 -
5686 /* HW does not support update faster than 1 seconds */
5687 pdata->rtc->uie_unsupported = 1;
5688 + pdata->rtc->ops = &xgene_rtc_ops;
5689 +
5690 + ret = rtc_register_device(pdata->rtc);
5691 + if (ret) {
5692 + clk_disable_unprepare(pdata->clk);
5693 + return ret;
5694 + }
5695
5696 return 0;
5697 }
5698 diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
5699 index 9811fd8a0c73..92eabbb5f18d 100644
5700 --- a/drivers/s390/cio/cio.h
5701 +++ b/drivers/s390/cio/cio.h
5702 @@ -115,7 +115,7 @@ struct subchannel {
5703 struct schib_config config;
5704 } __attribute__ ((aligned(8)));
5705
5706 -DECLARE_PER_CPU(struct irb, cio_irb);
5707 +DECLARE_PER_CPU_ALIGNED(struct irb, cio_irb);
5708
5709 #define to_subchannel(n) container_of(n, struct subchannel, dev)
5710
5711 diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
5712 index fabd9798e4c4..7a06cdff6572 100644
5713 --- a/drivers/s390/cio/vfio_ccw_drv.c
5714 +++ b/drivers/s390/cio/vfio_ccw_drv.c
5715 @@ -40,26 +40,30 @@ int vfio_ccw_sch_quiesce(struct subchannel *sch)
5716 if (ret != -EBUSY)
5717 goto out_unlock;
5718
5719 + iretry = 255;
5720 do {
5721 - iretry = 255;
5722
5723 ret = cio_cancel_halt_clear(sch, &iretry);
5724 - while (ret == -EBUSY) {
5725 - /*
5726 - * Flush all I/O and wait for
5727 - * cancel/halt/clear completion.
5728 - */
5729 - private->completion = &completion;
5730 - spin_unlock_irq(sch->lock);
5731
5732 - wait_for_completion_timeout(&completion, 3*HZ);
5733 + if (ret == -EIO) {
5734 + pr_err("vfio_ccw: could not quiesce subchannel 0.%x.%04x!\n",
5735 + sch->schid.ssid, sch->schid.sch_no);
5736 + break;
5737 + }
5738 +
5739 + /*
5740 + * Flush all I/O and wait for
5741 + * cancel/halt/clear completion.
5742 + */
5743 + private->completion = &completion;
5744 + spin_unlock_irq(sch->lock);
5745
5746 - spin_lock_irq(sch->lock);
5747 - private->completion = NULL;
5748 - flush_workqueue(vfio_ccw_work_q);
5749 - ret = cio_cancel_halt_clear(sch, &iretry);
5750 - };
5751 + if (ret == -EBUSY)
5752 + wait_for_completion_timeout(&completion, 3*HZ);
5753
5754 + private->completion = NULL;
5755 + flush_workqueue(vfio_ccw_work_q);
5756 + spin_lock_irq(sch->lock);
5757 ret = cio_disable_subchannel(sch);
5758 } while (ret == -EBUSY);
5759 out_unlock:
5760 diff --git a/drivers/s390/cio/vfio_ccw_ops.c b/drivers/s390/cio/vfio_ccw_ops.c
5761 index f673e106c041..dc5ff47de3fe 100644
5762 --- a/drivers/s390/cio/vfio_ccw_ops.c
5763 +++ b/drivers/s390/cio/vfio_ccw_ops.c
5764 @@ -130,11 +130,12 @@ static int vfio_ccw_mdev_remove(struct mdev_device *mdev)
5765
5766 if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
5767 (private->state != VFIO_CCW_STATE_STANDBY)) {
5768 - if (!vfio_ccw_mdev_reset(mdev))
5769 + if (!vfio_ccw_sch_quiesce(private->sch))
5770 private->state = VFIO_CCW_STATE_STANDBY;
5771 /* The state will be NOT_OPER on error. */
5772 }
5773
5774 + cp_free(&private->cp);
5775 private->mdev = NULL;
5776 atomic_inc(&private->avail);
5777
5778 @@ -158,6 +159,14 @@ static void vfio_ccw_mdev_release(struct mdev_device *mdev)
5779 struct vfio_ccw_private *private =
5780 dev_get_drvdata(mdev_parent_dev(mdev));
5781
5782 + if ((private->state != VFIO_CCW_STATE_NOT_OPER) &&
5783 + (private->state != VFIO_CCW_STATE_STANDBY)) {
5784 + if (!vfio_ccw_mdev_reset(mdev))
5785 + private->state = VFIO_CCW_STATE_STANDBY;
5786 + /* The state will be NOT_OPER on error. */
5787 + }
5788 +
5789 + cp_free(&private->cp);
5790 vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
5791 &private->nb);
5792 }
5793 diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
5794 index e6854127b434..b2737bfeb8bb 100644
5795 --- a/drivers/s390/crypto/zcrypt_api.c
5796 +++ b/drivers/s390/crypto/zcrypt_api.c
5797 @@ -224,6 +224,7 @@ static long zcrypt_rsa_modexpo(struct ica_rsa_modexpo *mex)
5798 trace_s390_zcrypt_req(mex, TP_ICARSAMODEXPO);
5799
5800 if (mex->outputdatalength < mex->inputdatalength) {
5801 + func_code = 0;
5802 rc = -EINVAL;
5803 goto out;
5804 }
5805 @@ -298,6 +299,7 @@ static long zcrypt_rsa_crt(struct ica_rsa_modexpo_crt *crt)
5806 trace_s390_zcrypt_req(crt, TP_ICARSACRT);
5807
5808 if (crt->outputdatalength < crt->inputdatalength) {
5809 + func_code = 0;
5810 rc = -EINVAL;
5811 goto out;
5812 }
5813 @@ -483,6 +485,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
5814
5815 targets = kcalloc(target_num, sizeof(*targets), GFP_KERNEL);
5816 if (!targets) {
5817 + func_code = 0;
5818 rc = -ENOMEM;
5819 goto out;
5820 }
5821 @@ -490,6 +493,7 @@ static long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
5822 uptr = (struct ep11_target_dev __force __user *) xcrb->targets;
5823 if (copy_from_user(targets, uptr,
5824 target_num * sizeof(*targets))) {
5825 + func_code = 0;
5826 rc = -EFAULT;
5827 goto out_free;
5828 }
5829 diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
5830 index 2d1f6a583641..b2657582cfcf 100644
5831 --- a/drivers/s390/net/qeth_core.h
5832 +++ b/drivers/s390/net/qeth_core.h
5833 @@ -201,6 +201,12 @@ struct qeth_vnicc_info {
5834 bool rx_bcast_enabled;
5835 };
5836
5837 +static inline int qeth_is_adp_supported(struct qeth_ipa_info *ipa,
5838 + enum qeth_ipa_setadp_cmd func)
5839 +{
5840 + return (ipa->supported_funcs & func);
5841 +}
5842 +
5843 static inline int qeth_is_ipa_supported(struct qeth_ipa_info *ipa,
5844 enum qeth_ipa_funcs func)
5845 {
5846 @@ -214,9 +220,7 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
5847 }
5848
5849 #define qeth_adp_supported(c, f) \
5850 - qeth_is_ipa_supported(&c->options.adp, f)
5851 -#define qeth_adp_enabled(c, f) \
5852 - qeth_is_ipa_enabled(&c->options.adp, f)
5853 + qeth_is_adp_supported(&c->options.adp, f)
5854 #define qeth_is_supported(c, f) \
5855 qeth_is_ipa_supported(&c->options.ipa4, f)
5856 #define qeth_is_enabled(c, f) \
5857 diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
5858 index 56aacf32f71b..461afc276db7 100644
5859 --- a/drivers/s390/net/qeth_core_main.c
5860 +++ b/drivers/s390/net/qeth_core_main.c
5861 @@ -1370,7 +1370,7 @@ static void qeth_set_multiple_write_queues(struct qeth_card *card)
5862 card->qdio.no_out_queues = 4;
5863 }
5864
5865 -static void qeth_update_from_chp_desc(struct qeth_card *card)
5866 +static int qeth_update_from_chp_desc(struct qeth_card *card)
5867 {
5868 struct ccw_device *ccwdev;
5869 struct channel_path_desc_fmt0 *chp_dsc;
5870 @@ -1380,7 +1380,7 @@ static void qeth_update_from_chp_desc(struct qeth_card *card)
5871 ccwdev = card->data.ccwdev;
5872 chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
5873 if (!chp_dsc)
5874 - goto out;
5875 + return -ENOMEM;
5876
5877 card->info.func_level = 0x4100 + chp_dsc->desc;
5878 if (card->info.type == QETH_CARD_TYPE_IQD)
5879 @@ -1395,6 +1395,7 @@ out:
5880 kfree(chp_dsc);
5881 QETH_DBF_TEXT_(SETUP, 2, "nr:%x", card->qdio.no_out_queues);
5882 QETH_DBF_TEXT_(SETUP, 2, "lvl:%02x", card->info.func_level);
5883 + return 0;
5884 }
5885
5886 static void qeth_init_qdio_info(struct qeth_card *card)
5887 @@ -5090,7 +5091,9 @@ int qeth_core_hardsetup_card(struct qeth_card *card)
5888
5889 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
5890 atomic_set(&card->force_alloc_skb, 0);
5891 - qeth_update_from_chp_desc(card);
5892 + rc = qeth_update_from_chp_desc(card);
5893 + if (rc)
5894 + return rc;
5895 retry:
5896 if (retries < 3)
5897 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
5898 @@ -5768,7 +5771,9 @@ static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5899 gdev->cdev[2]->handler = qeth_irq;
5900
5901 qeth_setup_card(card);
5902 - qeth_update_from_chp_desc(card);
5903 + rc = qeth_update_from_chp_desc(card);
5904 + if (rc)
5905 + goto err_chp_desc;
5906
5907 card->dev = qeth_alloc_netdev(card);
5908 if (!card->dev) {
5909 @@ -5806,6 +5811,7 @@ err_disc:
5910 qeth_core_free_discipline(card);
5911 err_load:
5912 free_netdev(card->dev);
5913 +err_chp_desc:
5914 err_card:
5915 qeth_core_free_card(card);
5916 err_dev:
5917 diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
5918 index e9ecc667e3fb..231eb79efa32 100644
5919 --- a/drivers/scsi/libsas/sas_expander.c
5920 +++ b/drivers/scsi/libsas/sas_expander.c
5921 @@ -2040,6 +2040,11 @@ static int sas_rediscover_dev(struct domain_device *dev, int phy_id, bool last)
5922 if ((SAS_ADDR(sas_addr) == 0) || (res == -ECOMM)) {
5923 phy->phy_state = PHY_EMPTY;
5924 sas_unregister_devs_sas_addr(dev, phy_id, last);
5925 + /*
5926 + * Even though the PHY is empty, for convenience we discover
5927 + * the PHY to update the PHY info, like negotiated linkrate.
5928 + */
5929 + sas_ex_phy_discover(dev, phy_id);
5930 return res;
5931 } else if (SAS_ADDR(sas_addr) == SAS_ADDR(phy->attached_sas_addr) &&
5932 dev_type_flutter(type, phy->attached_dev_type)) {
5933 diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
5934 index 1a964e71582f..d909d90035bb 100644
5935 --- a/drivers/scsi/lpfc/lpfc_ct.c
5936 +++ b/drivers/scsi/lpfc/lpfc_ct.c
5937 @@ -1762,6 +1762,9 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
5938 ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5939 memset(ae, 0, 256);
5940
5941 + /* This string MUST be consistent with other FC platforms
5942 + * supported by Broadcom.
5943 + */
5944 strncpy(ae->un.AttrString,
5945 "Emulex Corporation",
5946 sizeof(ae->un.AttrString));
5947 @@ -2117,10 +2120,11 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
5948 ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5949 memset(ae, 0, 32);
5950
5951 - ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
5952 - ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
5953 - ae->un.AttrTypes[6] = 0x01; /* Type 40 - NVME */
5954 - ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
5955 + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
5956 + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
5957 + if (vport->nvmei_support || vport->phba->nvmet_support)
5958 + ae->un.AttrTypes[6] = 0x01; /* Type 0x28 - NVME */
5959 + ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
5960 size = FOURBYTES + 32;
5961 ad->AttrLen = cpu_to_be16(size);
5962 ad->AttrType = cpu_to_be16(RPRT_SUPPORTED_FC4_TYPES);
5963 @@ -2425,9 +2429,11 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
5964 ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
5965 memset(ae, 0, 32);
5966
5967 - ae->un.AttrTypes[3] = 0x02; /* Type 1 - ELS */
5968 - ae->un.AttrTypes[2] = 0x01; /* Type 8 - FCP */
5969 - ae->un.AttrTypes[7] = 0x01; /* Type 32 - CT */
5970 + ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
5971 + ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
5972 + if (vport->phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)
5973 + ae->un.AttrTypes[6] = 0x1; /* Type 0x28 - NVME */
5974 + ae->un.AttrTypes[7] = 0x01; /* Type 0x20 - CT */
5975 size = FOURBYTES + 32;
5976 ad->AttrLen = cpu_to_be16(size);
5977 ad->AttrType = cpu_to_be16(RPRT_ACTIVE_FC4_TYPES);
5978 diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
5979 index eb71877f12f8..ccdd82b1123f 100644
5980 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c
5981 +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
5982 @@ -921,7 +921,11 @@ lpfc_linkdown(struct lpfc_hba *phba)
5983 }
5984 }
5985 lpfc_destroy_vport_work_array(phba, vports);
5986 - /* Clean up any firmware default rpi's */
5987 +
5988 + /* Clean up any SLI3 firmware default rpi's */
5989 + if (phba->sli_rev > LPFC_SLI_REV3)
5990 + goto skip_unreg_did;
5991 +
5992 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5993 if (mb) {
5994 lpfc_unreg_did(phba, 0xffff, LPFC_UNREG_ALL_DFLT_RPIS, mb);
5995 @@ -933,6 +937,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
5996 }
5997 }
5998
5999 + skip_unreg_did:
6000 /* Setup myDID for link up if we are in pt2pt mode */
6001 if (phba->pport->fc_flag & FC_PT2PT) {
6002 mb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6003 @@ -4855,6 +4860,10 @@ lpfc_unreg_default_rpis(struct lpfc_vport *vport)
6004 LPFC_MBOXQ_t *mbox;
6005 int rc;
6006
6007 + /* Unreg DID is an SLI3 operation. */
6008 + if (phba->sli_rev > LPFC_SLI_REV3)
6009 + return;
6010 +
6011 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6012 if (mbox) {
6013 lpfc_unreg_did(phba, vport->vpi, LPFC_UNREG_ALL_DFLT_RPIS,
6014 diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
6015 index ca62117a2d13..099f70798fdd 100644
6016 --- a/drivers/scsi/lpfc/lpfc_nvme.c
6017 +++ b/drivers/scsi/lpfc/lpfc_nvme.c
6018 @@ -2482,15 +2482,15 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
6019 if (!cstat)
6020 return -ENOMEM;
6021
6022 + if (!IS_ENABLED(CONFIG_NVME_FC))
6023 + return ret;
6024 +
6025 /* localport is allocated from the stack, but the registration
6026 * call allocates heap memory as well as the private area.
6027 */
6028 -#if (IS_ENABLED(CONFIG_NVME_FC))
6029 +
6030 ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
6031 &vport->phba->pcidev->dev, &localport);
6032 -#else
6033 - ret = -ENOMEM;
6034 -#endif
6035 if (!ret) {
6036 lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
6037 "6005 Successfully registered local "
6038 diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
6039 index 6bbc38b1b465..a17c13846d1e 100644
6040 --- a/drivers/scsi/qedf/qedf_io.c
6041 +++ b/drivers/scsi/qedf/qedf_io.c
6042 @@ -902,6 +902,7 @@ int qedf_post_io_req(struct qedf_rport *fcport, struct qedf_ioreq *io_req)
6043 if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
6044 QEDF_ERR(&(qedf->dbg_ctx), "Session not offloaded yet.\n");
6045 kref_put(&io_req->refcount, qedf_release_cmd);
6046 + return -EINVAL;
6047 }
6048
6049 /* Obtain free SQE */
6050 diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
6051 index d4821b9dea45..4130b9117055 100644
6052 --- a/drivers/scsi/qedi/qedi_iscsi.c
6053 +++ b/drivers/scsi/qedi/qedi_iscsi.c
6054 @@ -1001,6 +1001,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
6055 qedi_ep = ep->dd_data;
6056 qedi = qedi_ep->qedi;
6057
6058 + if (qedi_ep->state == EP_STATE_OFLDCONN_START)
6059 + goto ep_exit_recover;
6060 +
6061 flush_work(&qedi_ep->offload_work);
6062
6063 if (qedi_ep->conn) {
6064 diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
6065 index 36cbb29c84f6..88d8acf86a2a 100644
6066 --- a/drivers/scsi/qla2xxx/qla_isr.c
6067 +++ b/drivers/scsi/qla2xxx/qla_isr.c
6068 @@ -3449,7 +3449,7 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
6069 ql_log(ql_log_fatal, vha, 0x00c8,
6070 "Failed to allocate memory for ha->msix_entries.\n");
6071 ret = -ENOMEM;
6072 - goto msix_out;
6073 + goto free_irqs;
6074 }
6075 ha->flags.msix_enabled = 1;
6076
6077 @@ -3532,6 +3532,10 @@ msix_register_fail:
6078
6079 msix_out:
6080 return ret;
6081 +
6082 +free_irqs:
6083 + pci_free_irq_vectors(ha->pdev);
6084 + goto msix_out;
6085 }
6086
6087 int
6088 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
6089 index a8c67cd17625..9d7feb005acf 100644
6090 --- a/drivers/scsi/qla2xxx/qla_target.c
6091 +++ b/drivers/scsi/qla2xxx/qla_target.c
6092 @@ -684,7 +684,6 @@ done:
6093 void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
6094 {
6095 fc_port_t *t;
6096 - unsigned long flags;
6097
6098 switch (e->u.nack.type) {
6099 case SRB_NACK_PRLI:
6100 @@ -694,10 +693,8 @@ void qla24xx_do_nack_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
6101 if (t) {
6102 ql_log(ql_log_info, vha, 0xd034,
6103 "%s create sess success %p", __func__, t);
6104 - spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6105 /* create sess has an extra kref */
6106 vha->hw->tgt.tgt_ops->put_sess(e->u.nack.fcport);
6107 - spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6108 }
6109 break;
6110 }
6111 @@ -709,9 +706,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
6112 {
6113 fc_port_t *fcport = container_of(work, struct fc_port, del_work);
6114 struct qla_hw_data *ha = fcport->vha->hw;
6115 - unsigned long flags;
6116 -
6117 - spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6118
6119 if (fcport->se_sess) {
6120 ha->tgt.tgt_ops->shutdown_sess(fcport);
6121 @@ -719,7 +713,6 @@ void qla24xx_delete_sess_fn(struct work_struct *work)
6122 } else {
6123 qlt_unreg_sess(fcport);
6124 }
6125 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6126 }
6127
6128 /*
6129 @@ -788,8 +781,9 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
6130 fcport->port_name, sess->loop_id);
6131 sess->local = 0;
6132 }
6133 - ha->tgt.tgt_ops->put_sess(sess);
6134 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6135 +
6136 + ha->tgt.tgt_ops->put_sess(sess);
6137 }
6138
6139 /*
6140 @@ -4135,9 +4129,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
6141 /*
6142 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
6143 */
6144 - spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6145 ha->tgt.tgt_ops->put_sess(sess);
6146 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6147 return;
6148
6149 out_term:
6150 @@ -4154,9 +4146,7 @@ out_term:
6151 target_free_tag(sess->se_sess, &cmd->se_cmd);
6152 spin_unlock_irqrestore(qpair->qp_lock_ptr, flags);
6153
6154 - spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6155 ha->tgt.tgt_ops->put_sess(sess);
6156 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6157 }
6158
6159 static void qlt_do_work(struct work_struct *work)
6160 @@ -4365,9 +4355,7 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
6161 if (!cmd) {
6162 ql_dbg(ql_dbg_io, vha, 0x3062,
6163 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
6164 - spin_lock_irqsave(&ha->tgt.sess_lock, flags);
6165 ha->tgt.tgt_ops->put_sess(sess);
6166 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6167 return -EBUSY;
6168 }
6169
6170 @@ -6105,17 +6093,19 @@ static void qlt_abort_work(struct qla_tgt *tgt,
6171 }
6172
6173 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
6174 - ha->tgt.tgt_ops->put_sess(sess);
6175 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6176
6177 + ha->tgt.tgt_ops->put_sess(sess);
6178 +
6179 if (rc != 0)
6180 goto out_term;
6181 return;
6182
6183 out_term2:
6184 + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6185 +
6186 if (sess)
6187 ha->tgt.tgt_ops->put_sess(sess);
6188 - spin_unlock_irqrestore(&ha->tgt.sess_lock, flags2);
6189
6190 out_term:
6191 spin_lock_irqsave(&ha->hardware_lock, flags);
6192 @@ -6175,9 +6165,10 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
6193 scsilun_to_int((struct scsi_lun *)&a->u.isp24.fcp_cmnd.lun);
6194
6195 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
6196 - ha->tgt.tgt_ops->put_sess(sess);
6197 spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
6198
6199 + ha->tgt.tgt_ops->put_sess(sess);
6200 +
6201 if (rc != 0)
6202 goto out_term;
6203 return;
6204 diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
6205 index 64e2d859f633..b8c1a739dfbd 100644
6206 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
6207 +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
6208 @@ -350,7 +350,6 @@ static void tcm_qla2xxx_put_sess(struct fc_port *sess)
6209 if (!sess)
6210 return;
6211
6212 - assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
6213 kref_put(&sess->sess_kref, tcm_qla2xxx_release_session);
6214 }
6215
6216 @@ -365,8 +364,9 @@ static void tcm_qla2xxx_close_session(struct se_session *se_sess)
6217
6218 spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
6219 target_sess_cmd_list_set_waiting(se_sess);
6220 - tcm_qla2xxx_put_sess(sess);
6221 spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
6222 +
6223 + tcm_qla2xxx_put_sess(sess);
6224 }
6225
6226 static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
6227 @@ -390,6 +390,8 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
6228 cmd->se_cmd.transport_state,
6229 cmd->se_cmd.t_state,
6230 cmd->se_cmd.se_cmd_flags);
6231 + transport_generic_request_failure(&cmd->se_cmd,
6232 + TCM_CHECK_CONDITION_ABORT_CMD);
6233 return 0;
6234 }
6235 cmd->trc_flags |= TRC_XFR_RDY;
6236 @@ -829,7 +831,6 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct fc_port *sess)
6237
6238 static void tcm_qla2xxx_shutdown_sess(struct fc_port *sess)
6239 {
6240 - assert_spin_locked(&sess->vha->hw->tgt.sess_lock);
6241 target_sess_cmd_list_set_waiting(sess->se_sess);
6242 }
6243
6244 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
6245 index 5dd3e4e01b10..25c8ce54a976 100644
6246 --- a/drivers/scsi/qla4xxx/ql4_os.c
6247 +++ b/drivers/scsi/qla4xxx/ql4_os.c
6248 @@ -5935,7 +5935,7 @@ static int get_fw_boot_info(struct scsi_qla_host *ha, uint16_t ddb_index[])
6249 val = rd_nvram_byte(ha, sec_addr);
6250 if (val & BIT_7)
6251 ddb_index[1] = (val & 0x7f);
6252 -
6253 + goto exit_boot_info;
6254 } else if (is_qla80XX(ha)) {
6255 buf = dma_alloc_coherent(&ha->pdev->dev, size,
6256 &buf_dma, GFP_KERNEL);
6257 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
6258 index e925eda93191..77cb45ef55fc 100644
6259 --- a/drivers/scsi/sd.c
6260 +++ b/drivers/scsi/sd.c
6261 @@ -2605,7 +2605,6 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
6262 int res;
6263 struct scsi_device *sdp = sdkp->device;
6264 struct scsi_mode_data data;
6265 - int disk_ro = get_disk_ro(sdkp->disk);
6266 int old_wp = sdkp->write_prot;
6267
6268 set_disk_ro(sdkp->disk, 0);
6269 @@ -2646,7 +2645,7 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
6270 "Test WP failed, assume Write Enabled\n");
6271 } else {
6272 sdkp->write_prot = ((data.device_specific & 0x80) != 0);
6273 - set_disk_ro(sdkp->disk, sdkp->write_prot || disk_ro);
6274 + set_disk_ro(sdkp->disk, sdkp->write_prot);
6275 if (sdkp->first_scan || old_wp != sdkp->write_prot) {
6276 sd_printk(KERN_NOTICE, sdkp, "Write Protect is %s\n",
6277 sdkp->write_prot ? "on" : "off");
6278 diff --git a/drivers/scsi/ufs/ufs-hisi.c b/drivers/scsi/ufs/ufs-hisi.c
6279 index 452e19f8fb47..c2cee73a8560 100644
6280 --- a/drivers/scsi/ufs/ufs-hisi.c
6281 +++ b/drivers/scsi/ufs/ufs-hisi.c
6282 @@ -544,6 +544,10 @@ static int ufs_hisi_init_common(struct ufs_hba *hba)
6283 ufshcd_set_variant(hba, host);
6284
6285 host->rst = devm_reset_control_get(dev, "rst");
6286 + if (IS_ERR(host->rst)) {
6287 + dev_err(dev, "%s: failed to get reset control\n", __func__);
6288 + return PTR_ERR(host->rst);
6289 + }
6290
6291 ufs_hisi_set_pm_lvl(hba);
6292
6293 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
6294 index 6e80dfe4fa97..3183fa8c5857 100644
6295 --- a/drivers/scsi/ufs/ufshcd.c
6296 +++ b/drivers/scsi/ufs/ufshcd.c
6297 @@ -6130,19 +6130,19 @@ static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
6298 goto out;
6299 }
6300
6301 - if (hba->vreg_info.vcc)
6302 + if (hba->vreg_info.vcc && hba->vreg_info.vcc->max_uA)
6303 icc_level = ufshcd_get_max_icc_level(
6304 hba->vreg_info.vcc->max_uA,
6305 POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
6306 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
6307
6308 - if (hba->vreg_info.vccq)
6309 + if (hba->vreg_info.vccq && hba->vreg_info.vccq->max_uA)
6310 icc_level = ufshcd_get_max_icc_level(
6311 hba->vreg_info.vccq->max_uA,
6312 icc_level,
6313 &desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
6314
6315 - if (hba->vreg_info.vccq2)
6316 + if (hba->vreg_info.vccq2 && hba->vreg_info.vccq2->max_uA)
6317 icc_level = ufshcd_get_max_icc_level(
6318 hba->vreg_info.vccq2->max_uA,
6319 icc_level,
6320 @@ -6767,6 +6767,15 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
6321 if (!vreg)
6322 return 0;
6323
6324 + /*
6325 + * "set_load" operation shall be required on those regulators
6326 + * which specifically configured current limitation. Otherwise
6327 + * zero max_uA may cause unexpected behavior when regulator is
6328 + * enabled or set as high power mode.
6329 + */
6330 + if (!vreg->max_uA)
6331 + return 0;
6332 +
6333 ret = regulator_set_load(vreg->reg, ua);
6334 if (ret < 0) {
6335 dev_err(dev, "%s: %s set load (ua=%d) failed, err=%d\n",
6336 @@ -6813,12 +6822,15 @@ static int ufshcd_config_vreg(struct device *dev,
6337 name = vreg->name;
6338
6339 if (regulator_count_voltages(reg) > 0) {
6340 - min_uV = on ? vreg->min_uV : 0;
6341 - ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6342 - if (ret) {
6343 - dev_err(dev, "%s: %s set voltage failed, err=%d\n",
6344 + if (vreg->min_uV && vreg->max_uV) {
6345 + min_uV = on ? vreg->min_uV : 0;
6346 + ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
6347 + if (ret) {
6348 + dev_err(dev,
6349 + "%s: %s set voltage failed, err=%d\n",
6350 __func__, name, ret);
6351 - goto out;
6352 + goto out;
6353 + }
6354 }
6355
6356 uA_load = on ? vreg->max_uA : 0;
6357 diff --git a/drivers/slimbus/qcom-ngd-ctrl.c b/drivers/slimbus/qcom-ngd-ctrl.c
6358 index 14a9d18306cb..f63d1b8a0933 100644
6359 --- a/drivers/slimbus/qcom-ngd-ctrl.c
6360 +++ b/drivers/slimbus/qcom-ngd-ctrl.c
6361 @@ -1331,6 +1331,10 @@ static int of_qcom_slim_ngd_register(struct device *parent,
6362 return -ENOMEM;
6363
6364 ngd->pdev = platform_device_alloc(QCOM_SLIM_NGD_DRV_NAME, id);
6365 + if (!ngd->pdev) {
6366 + kfree(ngd);
6367 + return -ENOMEM;
6368 + }
6369 ngd->id = id;
6370 ngd->pdev->dev.parent = parent;
6371 ngd->pdev->driver_override = QCOM_SLIM_NGD_DRV_NAME;
6372 diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
6373 index 08dd3a31a3e5..5b6f3655c366 100644
6374 --- a/drivers/spi/spi-imx.c
6375 +++ b/drivers/spi/spi-imx.c
6376 @@ -1427,7 +1427,7 @@ static int spi_imx_transfer(struct spi_device *spi,
6377
6378 /* flush rxfifo before transfer */
6379 while (spi_imx->devtype_data->rx_available(spi_imx))
6380 - spi_imx->rx(spi_imx);
6381 + readl(spi_imx->base + MXC_CSPIRXDATA);
6382
6383 if (spi_imx->slave_mode)
6384 return spi_imx_pio_transfer_slave(spi, transfer);
6385 diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
6386 index b624f6fb04ce..729be74621e3 100644
6387 --- a/drivers/spi/spi-pxa2xx.c
6388 +++ b/drivers/spi/spi-pxa2xx.c
6389 @@ -876,10 +876,14 @@ static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
6390
6391 rate = min_t(int, ssp_clk, rate);
6392
6393 + /*
6394 + * Calculate the divisor for the SCR (Serial Clock Rate), avoiding
6395 + * that the SSP transmission rate can be greater than the device rate
6396 + */
6397 if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
6398 - return (ssp_clk / (2 * rate) - 1) & 0xff;
6399 + return (DIV_ROUND_UP(ssp_clk, 2 * rate) - 1) & 0xff;
6400 else
6401 - return (ssp_clk / rate - 1) & 0xfff;
6402 + return (DIV_ROUND_UP(ssp_clk, rate) - 1) & 0xfff;
6403 }
6404
6405 static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
6406 diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
6407 index b37de1d991d6..d61120822f02 100644
6408 --- a/drivers/spi/spi-rspi.c
6409 +++ b/drivers/spi/spi-rspi.c
6410 @@ -279,7 +279,8 @@ static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
6411 /* Sets parity, interrupt mask */
6412 rspi_write8(rspi, 0x00, RSPI_SPCR2);
6413
6414 - /* Sets SPCMD */
6415 + /* Resets sequencer */
6416 + rspi_write8(rspi, 0, RSPI_SPSCR);
6417 rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
6418 rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
6419
6420 @@ -323,7 +324,8 @@ static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
6421 rspi_write8(rspi, 0x00, RSPI_SSLND);
6422 rspi_write8(rspi, 0x00, RSPI_SPND);
6423
6424 - /* Sets SPCMD */
6425 + /* Resets sequencer */
6426 + rspi_write8(rspi, 0, RSPI_SPSCR);
6427 rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
6428 rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
6429
6430 @@ -374,7 +376,8 @@ static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
6431 /* Sets buffer to allow normal operation */
6432 rspi_write8(rspi, 0x00, QSPI_SPBFCR);
6433
6434 - /* Sets SPCMD */
6435 + /* Resets sequencer */
6436 + rspi_write8(rspi, 0, RSPI_SPSCR);
6437 rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
6438
6439 /* Sets RSPI mode */
6440 diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
6441 index a76acedd7e2f..a1888dc6a938 100644
6442 --- a/drivers/spi/spi-tegra114.c
6443 +++ b/drivers/spi/spi-tegra114.c
6444 @@ -1067,27 +1067,19 @@ static int tegra_spi_probe(struct platform_device *pdev)
6445
6446 spi_irq = platform_get_irq(pdev, 0);
6447 tspi->irq = spi_irq;
6448 - ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
6449 - tegra_spi_isr_thread, IRQF_ONESHOT,
6450 - dev_name(&pdev->dev), tspi);
6451 - if (ret < 0) {
6452 - dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
6453 - tspi->irq);
6454 - goto exit_free_master;
6455 - }
6456
6457 tspi->clk = devm_clk_get(&pdev->dev, "spi");
6458 if (IS_ERR(tspi->clk)) {
6459 dev_err(&pdev->dev, "can not get clock\n");
6460 ret = PTR_ERR(tspi->clk);
6461 - goto exit_free_irq;
6462 + goto exit_free_master;
6463 }
6464
6465 tspi->rst = devm_reset_control_get_exclusive(&pdev->dev, "spi");
6466 if (IS_ERR(tspi->rst)) {
6467 dev_err(&pdev->dev, "can not get reset\n");
6468 ret = PTR_ERR(tspi->rst);
6469 - goto exit_free_irq;
6470 + goto exit_free_master;
6471 }
6472
6473 tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
6474 @@ -1095,7 +1087,7 @@ static int tegra_spi_probe(struct platform_device *pdev)
6475
6476 ret = tegra_spi_init_dma_param(tspi, true);
6477 if (ret < 0)
6478 - goto exit_free_irq;
6479 + goto exit_free_master;
6480 ret = tegra_spi_init_dma_param(tspi, false);
6481 if (ret < 0)
6482 goto exit_rx_dma_free;
6483 @@ -1117,18 +1109,32 @@ static int tegra_spi_probe(struct platform_device *pdev)
6484 dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
6485 goto exit_pm_disable;
6486 }
6487 +
6488 + reset_control_assert(tspi->rst);
6489 + udelay(2);
6490 + reset_control_deassert(tspi->rst);
6491 tspi->def_command1_reg = SPI_M_S;
6492 tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
6493 pm_runtime_put(&pdev->dev);
6494 + ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
6495 + tegra_spi_isr_thread, IRQF_ONESHOT,
6496 + dev_name(&pdev->dev), tspi);
6497 + if (ret < 0) {
6498 + dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
6499 + tspi->irq);
6500 + goto exit_pm_disable;
6501 + }
6502
6503 master->dev.of_node = pdev->dev.of_node;
6504 ret = devm_spi_register_master(&pdev->dev, master);
6505 if (ret < 0) {
6506 dev_err(&pdev->dev, "can not register to master err %d\n", ret);
6507 - goto exit_pm_disable;
6508 + goto exit_free_irq;
6509 }
6510 return ret;
6511
6512 +exit_free_irq:
6513 + free_irq(spi_irq, tspi);
6514 exit_pm_disable:
6515 pm_runtime_disable(&pdev->dev);
6516 if (!pm_runtime_status_suspended(&pdev->dev))
6517 @@ -1136,8 +1142,6 @@ exit_pm_disable:
6518 tegra_spi_deinit_dma_param(tspi, false);
6519 exit_rx_dma_free:
6520 tegra_spi_deinit_dma_param(tspi, true);
6521 -exit_free_irq:
6522 - free_irq(spi_irq, tspi);
6523 exit_free_master:
6524 spi_master_put(master);
6525 return ret;
6526 diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
6527 index 97d137591b18..4389ab80c23e 100644
6528 --- a/drivers/spi/spi-topcliff-pch.c
6529 +++ b/drivers/spi/spi-topcliff-pch.c
6530 @@ -1294,18 +1294,27 @@ static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
6531 dma->rx_buf_virt, dma->rx_buf_dma);
6532 }
6533
6534 -static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
6535 +static int pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
6536 struct pch_spi_data *data)
6537 {
6538 struct pch_spi_dma_ctrl *dma;
6539 + int ret;
6540
6541 dma = &data->dma;
6542 + ret = 0;
6543 /* Get Consistent memory for Tx DMA */
6544 dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
6545 PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
6546 + if (!dma->tx_buf_virt)
6547 + ret = -ENOMEM;
6548 +
6549 /* Get Consistent memory for Rx DMA */
6550 dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
6551 PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
6552 + if (!dma->rx_buf_virt)
6553 + ret = -ENOMEM;
6554 +
6555 + return ret;
6556 }
6557
6558 static int pch_spi_pd_probe(struct platform_device *plat_dev)
6559 @@ -1382,7 +1391,9 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
6560
6561 if (use_dma) {
6562 dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
6563 - pch_alloc_dma_buf(board_dat, data);
6564 + ret = pch_alloc_dma_buf(board_dat, data);
6565 + if (ret)
6566 + goto err_spi_register_master;
6567 }
6568
6569 ret = spi_register_master(master);
6570 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
6571 index 9da0bc5a036c..88a8a8edd44b 100644
6572 --- a/drivers/spi/spi.c
6573 +++ b/drivers/spi/spi.c
6574 @@ -982,6 +982,8 @@ static int spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
6575 if (max_tx || max_rx) {
6576 list_for_each_entry(xfer, &msg->transfers,
6577 transfer_list) {
6578 + if (!xfer->len)
6579 + continue;
6580 if (!xfer->tx_buf)
6581 xfer->tx_buf = ctlr->dummy_tx;
6582 if (!xfer->rx_buf)
6583 diff --git a/drivers/ssb/bridge_pcmcia_80211.c b/drivers/ssb/bridge_pcmcia_80211.c
6584 index f51f150307df..ffa379efff83 100644
6585 --- a/drivers/ssb/bridge_pcmcia_80211.c
6586 +++ b/drivers/ssb/bridge_pcmcia_80211.c
6587 @@ -113,16 +113,21 @@ static struct pcmcia_driver ssb_host_pcmcia_driver = {
6588 .resume = ssb_host_pcmcia_resume,
6589 };
6590
6591 +static int pcmcia_init_failed;
6592 +
6593 /*
6594 * These are not module init/exit functions!
6595 * The module_pcmcia_driver() helper cannot be used here.
6596 */
6597 int ssb_host_pcmcia_init(void)
6598 {
6599 - return pcmcia_register_driver(&ssb_host_pcmcia_driver);
6600 + pcmcia_init_failed = pcmcia_register_driver(&ssb_host_pcmcia_driver);
6601 +
6602 + return pcmcia_init_failed;
6603 }
6604
6605 void ssb_host_pcmcia_exit(void)
6606 {
6607 - pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
6608 + if (!pcmcia_init_failed)
6609 + pcmcia_unregister_driver(&ssb_host_pcmcia_driver);
6610 }
6611 diff --git a/drivers/staging/media/davinci_vpfe/Kconfig b/drivers/staging/media/davinci_vpfe/Kconfig
6612 index aea449a8dbf8..76818cc48ddc 100644
6613 --- a/drivers/staging/media/davinci_vpfe/Kconfig
6614 +++ b/drivers/staging/media/davinci_vpfe/Kconfig
6615 @@ -1,7 +1,7 @@
6616 config VIDEO_DM365_VPFE
6617 tristate "DM365 VPFE Media Controller Capture Driver"
6618 depends on VIDEO_V4L2
6619 - depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || COMPILE_TEST
6620 + depends on (ARCH_DAVINCI_DM365 && !VIDEO_DM365_ISIF) || (COMPILE_TEST && !ARCH_OMAP1)
6621 depends on VIDEO_V4L2_SUBDEV_API
6622 depends on VIDEO_DAVINCI_VPBE_DISPLAY
6623 select VIDEOBUF2_DMA_CONTIG
6624 diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
6625 index e76720903064..c7c8ef67b67f 100644
6626 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
6627 +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_2835_arm.c
6628 @@ -208,6 +208,9 @@ vchiq_platform_init_state(VCHIQ_STATE_T *state)
6629 struct vchiq_2835_state *platform_state;
6630
6631 state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
6632 + if (!state->platform_state)
6633 + return VCHIQ_ERROR;
6634 +
6635 platform_state = (struct vchiq_2835_state *)state->platform_state;
6636
6637 platform_state->inited = 1;
6638 diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
6639 index 7642ced31436..63ce567eb6b7 100644
6640 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
6641 +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.c
6642 @@ -2537,6 +2537,8 @@ vchiq_init_state(VCHIQ_STATE_T *state, VCHIQ_SLOT_ZERO_T *slot_zero,
6643 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
6644
6645 status = vchiq_platform_init_state(state);
6646 + if (status != VCHIQ_SUCCESS)
6647 + return VCHIQ_ERROR;
6648
6649 /*
6650 bring up slot handler thread
6651 diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c
6652 index 28fc4ce75edb..8490a1b6b615 100644
6653 --- a/drivers/thunderbolt/icm.c
6654 +++ b/drivers/thunderbolt/icm.c
6655 @@ -476,6 +476,11 @@ static void add_switch(struct tb_switch *parent_sw, u64 route,
6656 goto out;
6657
6658 sw->uuid = kmemdup(uuid, sizeof(*uuid), GFP_KERNEL);
6659 + if (!sw->uuid) {
6660 + tb_sw_warn(sw, "cannot allocate memory for switch\n");
6661 + tb_switch_put(sw);
6662 + goto out;
6663 + }
6664 sw->connection_id = connection_id;
6665 sw->connection_key = connection_key;
6666 sw->link = link;
6667 diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c
6668 index 8fe913a95b4a..be3f8b592b05 100644
6669 --- a/drivers/thunderbolt/property.c
6670 +++ b/drivers/thunderbolt/property.c
6671 @@ -551,6 +551,11 @@ int tb_property_add_data(struct tb_property_dir *parent, const char *key,
6672
6673 property->length = size / 4;
6674 property->value.data = kzalloc(size, GFP_KERNEL);
6675 + if (!property->value.data) {
6676 + kfree(property);
6677 + return -ENOMEM;
6678 + }
6679 +
6680 memcpy(property->value.data, buf, buflen);
6681
6682 list_add_tail(&property->list, &parent->properties);
6683 @@ -581,7 +586,12 @@ int tb_property_add_text(struct tb_property_dir *parent, const char *key,
6684 return -ENOMEM;
6685
6686 property->length = size / 4;
6687 - property->value.data = kzalloc(size, GFP_KERNEL);
6688 + property->value.text = kzalloc(size, GFP_KERNEL);
6689 + if (!property->value.text) {
6690 + kfree(property);
6691 + return -ENOMEM;
6692 + }
6693 +
6694 strcpy(property->value.text, text);
6695
6696 list_add_tail(&property->list, &parent->properties);
6697 diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
6698 index dd9ae6f5d19c..bc7efa6e515d 100644
6699 --- a/drivers/thunderbolt/switch.c
6700 +++ b/drivers/thunderbolt/switch.c
6701 @@ -9,15 +9,13 @@
6702 #include <linux/idr.h>
6703 #include <linux/nvmem-provider.h>
6704 #include <linux/pm_runtime.h>
6705 +#include <linux/sched/signal.h>
6706 #include <linux/sizes.h>
6707 #include <linux/slab.h>
6708 #include <linux/vmalloc.h>
6709
6710 #include "tb.h"
6711
6712 -/* Switch authorization from userspace is serialized by this lock */
6713 -static DEFINE_MUTEX(switch_lock);
6714 -
6715 /* Switch NVM support */
6716
6717 #define NVM_DEVID 0x05
6718 @@ -253,8 +251,8 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
6719 struct tb_switch *sw = priv;
6720 int ret = 0;
6721
6722 - if (mutex_lock_interruptible(&switch_lock))
6723 - return -ERESTARTSYS;
6724 + if (!mutex_trylock(&sw->tb->lock))
6725 + return restart_syscall();
6726
6727 /*
6728 * Since writing the NVM image might require some special steps,
6729 @@ -274,7 +272,7 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
6730 memcpy(sw->nvm->buf + offset, val, bytes);
6731
6732 unlock:
6733 - mutex_unlock(&switch_lock);
6734 + mutex_unlock(&sw->tb->lock);
6735
6736 return ret;
6737 }
6738 @@ -363,10 +361,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw)
6739 }
6740 nvm->non_active = nvm_dev;
6741
6742 - mutex_lock(&switch_lock);
6743 sw->nvm = nvm;
6744 - mutex_unlock(&switch_lock);
6745 -
6746 return 0;
6747
6748 err_nvm_active:
6749 @@ -383,10 +378,8 @@ static void tb_switch_nvm_remove(struct tb_switch *sw)
6750 {
6751 struct tb_switch_nvm *nvm;
6752
6753 - mutex_lock(&switch_lock);
6754 nvm = sw->nvm;
6755 sw->nvm = NULL;
6756 - mutex_unlock(&switch_lock);
6757
6758 if (!nvm)
6759 return;
6760 @@ -717,8 +710,8 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
6761 {
6762 int ret = -EINVAL;
6763
6764 - if (mutex_lock_interruptible(&switch_lock))
6765 - return -ERESTARTSYS;
6766 + if (!mutex_trylock(&sw->tb->lock))
6767 + return restart_syscall();
6768
6769 if (sw->authorized)
6770 goto unlock;
6771 @@ -761,7 +754,7 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val)
6772 }
6773
6774 unlock:
6775 - mutex_unlock(&switch_lock);
6776 + mutex_unlock(&sw->tb->lock);
6777 return ret;
6778 }
6779
6780 @@ -818,15 +811,15 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr,
6781 struct tb_switch *sw = tb_to_switch(dev);
6782 ssize_t ret;
6783
6784 - if (mutex_lock_interruptible(&switch_lock))
6785 - return -ERESTARTSYS;
6786 + if (!mutex_trylock(&sw->tb->lock))
6787 + return restart_syscall();
6788
6789 if (sw->key)
6790 ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key);
6791 else
6792 ret = sprintf(buf, "\n");
6793
6794 - mutex_unlock(&switch_lock);
6795 + mutex_unlock(&sw->tb->lock);
6796 return ret;
6797 }
6798
6799 @@ -843,8 +836,8 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
6800 else if (hex2bin(key, buf, sizeof(key)))
6801 return -EINVAL;
6802
6803 - if (mutex_lock_interruptible(&switch_lock))
6804 - return -ERESTARTSYS;
6805 + if (!mutex_trylock(&sw->tb->lock))
6806 + return restart_syscall();
6807
6808 if (sw->authorized) {
6809 ret = -EBUSY;
6810 @@ -859,7 +852,7 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr,
6811 }
6812 }
6813
6814 - mutex_unlock(&switch_lock);
6815 + mutex_unlock(&sw->tb->lock);
6816 return ret;
6817 }
6818 static DEVICE_ATTR(key, 0600, key_show, key_store);
6819 @@ -905,8 +898,8 @@ static ssize_t nvm_authenticate_store(struct device *dev,
6820 bool val;
6821 int ret;
6822
6823 - if (mutex_lock_interruptible(&switch_lock))
6824 - return -ERESTARTSYS;
6825 + if (!mutex_trylock(&sw->tb->lock))
6826 + return restart_syscall();
6827
6828 /* If NVMem devices are not yet added */
6829 if (!sw->nvm) {
6830 @@ -954,7 +947,7 @@ static ssize_t nvm_authenticate_store(struct device *dev,
6831 }
6832
6833 exit_unlock:
6834 - mutex_unlock(&switch_lock);
6835 + mutex_unlock(&sw->tb->lock);
6836
6837 if (ret)
6838 return ret;
6839 @@ -968,8 +961,8 @@ static ssize_t nvm_version_show(struct device *dev,
6840 struct tb_switch *sw = tb_to_switch(dev);
6841 int ret;
6842
6843 - if (mutex_lock_interruptible(&switch_lock))
6844 - return -ERESTARTSYS;
6845 + if (!mutex_trylock(&sw->tb->lock))
6846 + return restart_syscall();
6847
6848 if (sw->safe_mode)
6849 ret = -ENODATA;
6850 @@ -978,7 +971,7 @@ static ssize_t nvm_version_show(struct device *dev,
6851 else
6852 ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor);
6853
6854 - mutex_unlock(&switch_lock);
6855 + mutex_unlock(&sw->tb->lock);
6856
6857 return ret;
6858 }
6859 @@ -1296,13 +1289,14 @@ int tb_switch_configure(struct tb_switch *sw)
6860 return tb_plug_events_active(sw, true);
6861 }
6862
6863 -static void tb_switch_set_uuid(struct tb_switch *sw)
6864 +static int tb_switch_set_uuid(struct tb_switch *sw)
6865 {
6866 u32 uuid[4];
6867 - int cap;
6868 + int cap, ret;
6869
6870 + ret = 0;
6871 if (sw->uuid)
6872 - return;
6873 + return ret;
6874
6875 /*
6876 * The newer controllers include fused UUID as part of link
6877 @@ -1310,7 +1304,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
6878 */
6879 cap = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER);
6880 if (cap > 0) {
6881 - tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
6882 + ret = tb_sw_read(sw, uuid, TB_CFG_SWITCH, cap + 3, 4);
6883 + if (ret)
6884 + return ret;
6885 } else {
6886 /*
6887 * ICM generates UUID based on UID and fills the upper
6888 @@ -1325,6 +1321,9 @@ static void tb_switch_set_uuid(struct tb_switch *sw)
6889 }
6890
6891 sw->uuid = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
6892 + if (!sw->uuid)
6893 + ret = -ENOMEM;
6894 + return ret;
6895 }
6896
6897 static int tb_switch_add_dma_port(struct tb_switch *sw)
6898 @@ -1374,7 +1373,9 @@ static int tb_switch_add_dma_port(struct tb_switch *sw)
6899
6900 if (status) {
6901 tb_sw_info(sw, "switch flash authentication failed\n");
6902 - tb_switch_set_uuid(sw);
6903 + ret = tb_switch_set_uuid(sw);
6904 + if (ret)
6905 + return ret;
6906 nvm_set_auth_status(sw, status);
6907 }
6908
6909 @@ -1424,7 +1425,9 @@ int tb_switch_add(struct tb_switch *sw)
6910 }
6911 tb_sw_info(sw, "uid: %#llx\n", sw->uid);
6912
6913 - tb_switch_set_uuid(sw);
6914 + ret = tb_switch_set_uuid(sw);
6915 + if (ret)
6916 + return ret;
6917
6918 for (i = 0; i <= sw->config.max_port_number; i++) {
6919 if (sw->ports[i].disabled) {
6920 diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h
6921 index 5067d69d0501..7a0ee9836a8a 100644
6922 --- a/drivers/thunderbolt/tb.h
6923 +++ b/drivers/thunderbolt/tb.h
6924 @@ -79,8 +79,7 @@ struct tb_switch_nvm {
6925 * @depth: Depth in the chain this switch is connected (ICM only)
6926 *
6927 * When the switch is being added or removed to the domain (other
6928 - * switches) you need to have domain lock held. For switch authorization
6929 - * internal switch_lock is enough.
6930 + * switches) you need to have domain lock held.
6931 */
6932 struct tb_switch {
6933 struct device dev;
6934 diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c
6935 index db8bece63327..befe75490697 100644
6936 --- a/drivers/thunderbolt/xdomain.c
6937 +++ b/drivers/thunderbolt/xdomain.c
6938 @@ -743,6 +743,7 @@ static void enumerate_services(struct tb_xdomain *xd)
6939 struct tb_service *svc;
6940 struct tb_property *p;
6941 struct device *dev;
6942 + int id;
6943
6944 /*
6945 * First remove all services that are not available anymore in
6946 @@ -771,7 +772,12 @@ static void enumerate_services(struct tb_xdomain *xd)
6947 break;
6948 }
6949
6950 - svc->id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
6951 + id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL);
6952 + if (id < 0) {
6953 + kfree(svc);
6954 + break;
6955 + }
6956 + svc->id = id;
6957 svc->dev.bus = &tb_bus_type;
6958 svc->dev.type = &tb_service_type;
6959 svc->dev.parent = &xd->dev;
6960 diff --git a/drivers/tty/ipwireless/main.c b/drivers/tty/ipwireless/main.c
6961 index 3475e841ef5c..4c18bbfe1a92 100644
6962 --- a/drivers/tty/ipwireless/main.c
6963 +++ b/drivers/tty/ipwireless/main.c
6964 @@ -114,6 +114,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
6965
6966 ipw->common_memory = ioremap(p_dev->resource[2]->start,
6967 resource_size(p_dev->resource[2]));
6968 + if (!ipw->common_memory) {
6969 + ret = -ENOMEM;
6970 + goto exit1;
6971 + }
6972 if (!request_mem_region(p_dev->resource[2]->start,
6973 resource_size(p_dev->resource[2]),
6974 IPWIRELESS_PCCARD_NAME)) {
6975 @@ -134,6 +138,10 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data)
6976
6977 ipw->attr_memory = ioremap(p_dev->resource[3]->start,
6978 resource_size(p_dev->resource[3]));
6979 + if (!ipw->attr_memory) {
6980 + ret = -ENOMEM;
6981 + goto exit3;
6982 + }
6983 if (!request_mem_region(p_dev->resource[3]->start,
6984 resource_size(p_dev->resource[3]),
6985 IPWIRELESS_PCCARD_NAME)) {
6986 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
6987 index 1c21955fe7c0..b82a7d787add 100644
6988 --- a/drivers/usb/core/hcd.c
6989 +++ b/drivers/usb/core/hcd.c
6990 @@ -3017,6 +3017,9 @@ usb_hcd_platform_shutdown(struct platform_device *dev)
6991 {
6992 struct usb_hcd *hcd = platform_get_drvdata(dev);
6993
6994 + /* No need for pm_runtime_put(), we're shutting down */
6995 + pm_runtime_get_sync(&dev->dev);
6996 +
6997 if (hcd->driver->shutdown)
6998 hcd->driver->shutdown(hcd);
6999 }
7000 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
7001 index bbcfa63d0233..eb24ec0e160d 100644
7002 --- a/drivers/usb/core/hub.c
7003 +++ b/drivers/usb/core/hub.c
7004 @@ -5823,7 +5823,10 @@ int usb_reset_device(struct usb_device *udev)
7005 cintf->needs_binding = 1;
7006 }
7007 }
7008 - usb_unbind_and_rebind_marked_interfaces(udev);
7009 +
7010 + /* If the reset failed, hub_wq will unbind drivers later */
7011 + if (ret == 0)
7012 + usb_unbind_and_rebind_marked_interfaces(udev);
7013 }
7014
7015 usb_autosuspend_device(udev);
7016 diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
7017 index 220c0f9b89b0..03614ef64ca4 100644
7018 --- a/drivers/usb/dwc2/gadget.c
7019 +++ b/drivers/usb/dwc2/gadget.c
7020 @@ -675,13 +675,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep)
7021 unsigned int maxsize;
7022
7023 if (is_isoc)
7024 - maxsize = hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
7025 - DEV_DMA_ISOC_RX_NBYTES_LIMIT;
7026 + maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT :
7027 + DEV_DMA_ISOC_RX_NBYTES_LIMIT) *
7028 + MAX_DMA_DESC_NUM_HS_ISOC;
7029 else
7030 - maxsize = DEV_DMA_NBYTES_LIMIT;
7031 -
7032 - /* Above size of one descriptor was chosen, multiple it */
7033 - maxsize *= MAX_DMA_DESC_NUM_GENERIC;
7034 + maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC;
7035
7036 return maxsize;
7037 }
7038 @@ -864,7 +862,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
7039
7040 /* Update index of last configured entry in the chain */
7041 hs_ep->next_desc++;
7042 - if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_GENERIC)
7043 + if (hs_ep->next_desc >= MAX_DMA_DESC_NUM_HS_ISOC)
7044 hs_ep->next_desc = 0;
7045
7046 return 0;
7047 @@ -896,7 +894,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
7048 }
7049
7050 /* Initialize descriptor chain by Host Busy status */
7051 - for (i = 0; i < MAX_DMA_DESC_NUM_GENERIC; i++) {
7052 + for (i = 0; i < MAX_DMA_DESC_NUM_HS_ISOC; i++) {
7053 desc = &hs_ep->desc_list[i];
7054 desc->status = 0;
7055 desc->status |= (DEV_DMA_BUFF_STS_HBUSY
7056 @@ -2083,7 +2081,7 @@ static void dwc2_gadget_complete_isoc_request_ddma(struct dwc2_hsotg_ep *hs_ep)
7057 dwc2_hsotg_complete_request(hsotg, hs_ep, hs_req, 0);
7058
7059 hs_ep->compl_desc++;
7060 - if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_GENERIC - 1))
7061 + if (hs_ep->compl_desc > (MAX_DMA_DESC_NUM_HS_ISOC - 1))
7062 hs_ep->compl_desc = 0;
7063 desc_sts = hs_ep->desc_list[hs_ep->compl_desc].status;
7064 }
7065 @@ -3779,6 +3777,7 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
7066 unsigned int i, val, size;
7067 int ret = 0;
7068 unsigned char ep_type;
7069 + int desc_num;
7070
7071 dev_dbg(hsotg->dev,
7072 "%s: ep %s: a 0x%02x, attr 0x%02x, mps 0x%04x, intr %d\n",
7073 @@ -3825,11 +3824,15 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
7074 dev_dbg(hsotg->dev, "%s: read DxEPCTL=0x%08x from 0x%08x\n",
7075 __func__, epctrl, epctrl_reg);
7076
7077 + if (using_desc_dma(hsotg) && ep_type == USB_ENDPOINT_XFER_ISOC)
7078 + desc_num = MAX_DMA_DESC_NUM_HS_ISOC;
7079 + else
7080 + desc_num = MAX_DMA_DESC_NUM_GENERIC;
7081 +
7082 /* Allocate DMA descriptor chain for non-ctrl endpoints */
7083 if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
7084 hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
7085 - MAX_DMA_DESC_NUM_GENERIC *
7086 - sizeof(struct dwc2_dma_desc),
7087 + desc_num * sizeof(struct dwc2_dma_desc),
7088 &hs_ep->desc_list_dma, GFP_ATOMIC);
7089 if (!hs_ep->desc_list) {
7090 ret = -ENOMEM;
7091 @@ -3971,7 +3974,7 @@ error1:
7092
7093 error2:
7094 if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
7095 - dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
7096 + dmam_free_coherent(hsotg->dev, desc_num *
7097 sizeof(struct dwc2_dma_desc),
7098 hs_ep->desc_list, hs_ep->desc_list_dma);
7099 hs_ep->desc_list = NULL;
7100 diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
7101 index 4d5c7dda8f54..05b9ccff7447 100644
7102 --- a/drivers/usb/dwc3/core.c
7103 +++ b/drivers/usb/dwc3/core.c
7104 @@ -1591,6 +1591,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
7105 spin_lock_irqsave(&dwc->lock, flags);
7106 dwc3_gadget_suspend(dwc);
7107 spin_unlock_irqrestore(&dwc->lock, flags);
7108 + synchronize_irq(dwc->irq_gadget);
7109 dwc3_core_exit(dwc);
7110 break;
7111 case DWC3_GCTL_PRTCAP_HOST:
7112 @@ -1623,6 +1624,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg)
7113 spin_lock_irqsave(&dwc->lock, flags);
7114 dwc3_gadget_suspend(dwc);
7115 spin_unlock_irqrestore(&dwc->lock, flags);
7116 + synchronize_irq(dwc->irq_gadget);
7117 }
7118
7119 dwc3_otg_exit(dwc);
7120 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
7121 index 524104eed8a7..65ba1038b111 100644
7122 --- a/drivers/usb/dwc3/gadget.c
7123 +++ b/drivers/usb/dwc3/gadget.c
7124 @@ -3277,8 +3277,6 @@ int dwc3_gadget_suspend(struct dwc3 *dwc)
7125 dwc3_disconnect_gadget(dwc);
7126 __dwc3_gadget_stop(dwc);
7127
7128 - synchronize_irq(dwc->irq_gadget);
7129 -
7130 return 0;
7131 }
7132
7133 diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
7134 index 68a113594808..2811c4afde01 100644
7135 --- a/drivers/video/fbdev/core/fbcmap.c
7136 +++ b/drivers/video/fbdev/core/fbcmap.c
7137 @@ -94,6 +94,8 @@ int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags)
7138 int size = len * sizeof(u16);
7139 int ret = -ENOMEM;
7140
7141 + flags |= __GFP_NOWARN;
7142 +
7143 if (cmap->len != len) {
7144 fb_dealloc_cmap(cmap);
7145 if (!len)
7146 diff --git a/drivers/video/fbdev/core/modedb.c b/drivers/video/fbdev/core/modedb.c
7147 index 283d9307df21..ac049871704d 100644
7148 --- a/drivers/video/fbdev/core/modedb.c
7149 +++ b/drivers/video/fbdev/core/modedb.c
7150 @@ -935,6 +935,9 @@ void fb_var_to_videomode(struct fb_videomode *mode,
7151 if (var->vmode & FB_VMODE_DOUBLE)
7152 vtotal *= 2;
7153
7154 + if (!htotal || !vtotal)
7155 + return;
7156 +
7157 hfreq = pixclock/htotal;
7158 mode->refresh = hfreq/vtotal;
7159 }
7160 diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
7161 index fd02e8a4841d..9f39f0c360e0 100644
7162 --- a/drivers/video/fbdev/efifb.c
7163 +++ b/drivers/video/fbdev/efifb.c
7164 @@ -464,7 +464,8 @@ static int efifb_probe(struct platform_device *dev)
7165 info->apertures->ranges[0].base = efifb_fix.smem_start;
7166 info->apertures->ranges[0].size = size_remap;
7167
7168 - if (!efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
7169 + if (efi_enabled(EFI_BOOT) &&
7170 + !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
7171 if ((efifb_fix.smem_start + efifb_fix.smem_len) >
7172 (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
7173 pr_err("efifb: video memory @ 0x%lx spans multiple EFI memory regions\n",
7174 diff --git a/drivers/video/fbdev/udlfb.c b/drivers/video/fbdev/udlfb.c
7175 index 2001910fd241..5a0d6fb02bbc 100644
7176 --- a/drivers/video/fbdev/udlfb.c
7177 +++ b/drivers/video/fbdev/udlfb.c
7178 @@ -1659,7 +1659,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
7179 dlfb = kzalloc(sizeof(*dlfb), GFP_KERNEL);
7180 if (!dlfb) {
7181 dev_err(&intf->dev, "%s: failed to allocate dlfb\n", __func__);
7182 - goto error;
7183 + return -ENOMEM;
7184 }
7185
7186 INIT_LIST_HEAD(&dlfb->deferred_free);
7187 @@ -1769,7 +1769,7 @@ static int dlfb_usb_probe(struct usb_interface *intf,
7188 error:
7189 if (dlfb->info) {
7190 dlfb_ops_destroy(dlfb->info);
7191 - } else if (dlfb) {
7192 + } else {
7193 usb_put_dev(dlfb->udev);
7194 kfree(dlfb);
7195 }
7196 @@ -1796,12 +1796,10 @@ static void dlfb_usb_disconnect(struct usb_interface *intf)
7197 /* this function will wait for all in-flight urbs to complete */
7198 dlfb_free_urb_list(dlfb);
7199
7200 - if (info) {
7201 - /* remove udlfb's sysfs interfaces */
7202 - for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
7203 - device_remove_file(info->dev, &fb_device_attrs[i]);
7204 - device_remove_bin_file(info->dev, &edid_attr);
7205 - }
7206 + /* remove udlfb's sysfs interfaces */
7207 + for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
7208 + device_remove_file(info->dev, &fb_device_attrs[i]);
7209 + device_remove_bin_file(info->dev, &edid_attr);
7210
7211 unregister_framebuffer(info);
7212 }
7213 diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c
7214 index 0364d3329c52..3516ce6718d9 100644
7215 --- a/drivers/w1/w1_io.c
7216 +++ b/drivers/w1/w1_io.c
7217 @@ -432,8 +432,7 @@ int w1_reset_resume_command(struct w1_master *dev)
7218 if (w1_reset_bus(dev))
7219 return -1;
7220
7221 - /* This will make only the last matched slave perform a skip ROM. */
7222 - w1_write_8(dev, W1_RESUME_CMD);
7223 + w1_write_8(dev, dev->slave_count > 1 ? W1_RESUME_CMD : W1_SKIP_ROM);
7224 return 0;
7225 }
7226 EXPORT_SYMBOL_GPL(w1_reset_resume_command);
7227 diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
7228 index 2a4f52c7be22..ac6c383d6314 100644
7229 --- a/fs/btrfs/backref.c
7230 +++ b/fs/btrfs/backref.c
7231 @@ -710,7 +710,7 @@ out:
7232 * read tree blocks and add keys where required.
7233 */
7234 static int add_missing_keys(struct btrfs_fs_info *fs_info,
7235 - struct preftrees *preftrees)
7236 + struct preftrees *preftrees, bool lock)
7237 {
7238 struct prelim_ref *ref;
7239 struct extent_buffer *eb;
7240 @@ -735,12 +735,14 @@ static int add_missing_keys(struct btrfs_fs_info *fs_info,
7241 free_extent_buffer(eb);
7242 return -EIO;
7243 }
7244 - btrfs_tree_read_lock(eb);
7245 + if (lock)
7246 + btrfs_tree_read_lock(eb);
7247 if (btrfs_header_level(eb) == 0)
7248 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
7249 else
7250 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
7251 - btrfs_tree_read_unlock(eb);
7252 + if (lock)
7253 + btrfs_tree_read_unlock(eb);
7254 free_extent_buffer(eb);
7255 prelim_ref_insert(fs_info, &preftrees->indirect, ref, NULL);
7256 cond_resched();
7257 @@ -1225,7 +1227,7 @@ again:
7258
7259 btrfs_release_path(path);
7260
7261 - ret = add_missing_keys(fs_info, &preftrees);
7262 + ret = add_missing_keys(fs_info, &preftrees, path->skip_locking == 0);
7263 if (ret)
7264 goto out;
7265
7266 @@ -1286,11 +1288,14 @@ again:
7267 ret = -EIO;
7268 goto out;
7269 }
7270 - btrfs_tree_read_lock(eb);
7271 - btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7272 + if (!path->skip_locking) {
7273 + btrfs_tree_read_lock(eb);
7274 + btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
7275 + }
7276 ret = find_extent_in_eb(eb, bytenr,
7277 *extent_item_pos, &eie, ignore_offset);
7278 - btrfs_tree_read_unlock_blocking(eb);
7279 + if (!path->skip_locking)
7280 + btrfs_tree_read_unlock_blocking(eb);
7281 free_extent_buffer(eb);
7282 if (ret < 0)
7283 goto out;
7284 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
7285 index 809c2c307c64..0cc800d22a08 100644
7286 --- a/fs/btrfs/extent-tree.c
7287 +++ b/fs/btrfs/extent-tree.c
7288 @@ -3911,8 +3911,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
7289 info->space_info_kobj, "%s",
7290 alloc_name(space_info->flags));
7291 if (ret) {
7292 - percpu_counter_destroy(&space_info->total_bytes_pinned);
7293 - kfree(space_info);
7294 + kobject_put(&space_info->kobj);
7295 return ret;
7296 }
7297
7298 @@ -10789,9 +10788,9 @@ int btrfs_error_unpin_extent_range(struct btrfs_fs_info *fs_info,
7299 * held back allocations.
7300 */
7301 static int btrfs_trim_free_extents(struct btrfs_device *device,
7302 - struct fstrim_range *range, u64 *trimmed)
7303 + u64 minlen, u64 *trimmed)
7304 {
7305 - u64 start = range->start, len = 0;
7306 + u64 start = 0, len = 0;
7307 int ret;
7308
7309 *trimmed = 0;
7310 @@ -10834,8 +10833,8 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
7311 if (!trans)
7312 up_read(&fs_info->commit_root_sem);
7313
7314 - ret = find_free_dev_extent_start(trans, device, range->minlen,
7315 - start, &start, &len);
7316 + ret = find_free_dev_extent_start(trans, device, minlen, start,
7317 + &start, &len);
7318 if (trans) {
7319 up_read(&fs_info->commit_root_sem);
7320 btrfs_put_transaction(trans);
7321 @@ -10848,16 +10847,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
7322 break;
7323 }
7324
7325 - /* If we are out of the passed range break */
7326 - if (start > range->start + range->len - 1) {
7327 - mutex_unlock(&fs_info->chunk_mutex);
7328 - ret = 0;
7329 - break;
7330 - }
7331 -
7332 - start = max(range->start, start);
7333 - len = min(range->len, len);
7334 -
7335 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
7336 mutex_unlock(&fs_info->chunk_mutex);
7337
7338 @@ -10867,10 +10856,6 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
7339 start += len;
7340 *trimmed += bytes;
7341
7342 - /* We've trimmed enough */
7343 - if (*trimmed >= range->len)
7344 - break;
7345 -
7346 if (fatal_signal_pending(current)) {
7347 ret = -ERESTARTSYS;
7348 break;
7349 @@ -10954,7 +10939,8 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
7350 mutex_lock(&fs_info->fs_devices->device_list_mutex);
7351 devices = &fs_info->fs_devices->devices;
7352 list_for_each_entry(device, devices, dev_list) {
7353 - ret = btrfs_trim_free_extents(device, range, &group_trimmed);
7354 + ret = btrfs_trim_free_extents(device, range->minlen,
7355 + &group_trimmed);
7356 if (ret) {
7357 dev_failed++;
7358 dev_ret = ret;
7359 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
7360 index ca4902c66dc4..e24c0a69ff5d 100644
7361 --- a/fs/btrfs/file.c
7362 +++ b/fs/btrfs/file.c
7363 @@ -2058,6 +2058,18 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
7364 int ret = 0, err;
7365 u64 len;
7366
7367 + /*
7368 + * If the inode needs a full sync, make sure we use a full range to
7369 + * avoid log tree corruption, due to hole detection racing with ordered
7370 + * extent completion for adjacent ranges, and assertion failures during
7371 + * hole detection.
7372 + */
7373 + if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
7374 + &BTRFS_I(inode)->runtime_flags)) {
7375 + start = 0;
7376 + end = LLONG_MAX;
7377 + }
7378 +
7379 /*
7380 * The range length can be represented by u64, we have to do the typecasts
7381 * to avoid signed overflow if it's [0, LLONG_MAX] eg. from fsync()
7382 @@ -2565,10 +2577,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
7383
7384 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
7385 &cached_state);
7386 - if (ret) {
7387 - inode_unlock(inode);
7388 + if (ret)
7389 goto out_only_mutex;
7390 - }
7391
7392 path = btrfs_alloc_path();
7393 if (!path) {
7394 @@ -3151,6 +3161,7 @@ static long btrfs_fallocate(struct file *file, int mode,
7395 ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
7396 cur_offset, last_byte - cur_offset);
7397 if (ret < 0) {
7398 + cur_offset = last_byte;
7399 free_extent_map(em);
7400 break;
7401 }
7402 @@ -3200,7 +3211,7 @@ out:
7403 /* Let go of our reservation. */
7404 if (ret != 0 && !(mode & FALLOC_FL_ZERO_RANGE))
7405 btrfs_free_reserved_data_space(inode, data_reserved,
7406 - alloc_start, alloc_end - cur_offset);
7407 + cur_offset, alloc_end - cur_offset);
7408 extent_changeset_free(data_reserved);
7409 return ret;
7410 }
7411 diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
7412 index 0526b6c473c7..5d57ed629345 100644
7413 --- a/fs/btrfs/relocation.c
7414 +++ b/fs/btrfs/relocation.c
7415 @@ -4289,27 +4289,36 @@ int btrfs_relocate_block_group(struct btrfs_fs_info *fs_info, u64 group_start)
7416 mutex_lock(&fs_info->cleaner_mutex);
7417 ret = relocate_block_group(rc);
7418 mutex_unlock(&fs_info->cleaner_mutex);
7419 - if (ret < 0) {
7420 + if (ret < 0)
7421 err = ret;
7422 - goto out;
7423 - }
7424 -
7425 - if (rc->extents_found == 0)
7426 - break;
7427 -
7428 - btrfs_info(fs_info, "found %llu extents", rc->extents_found);
7429
7430 + /*
7431 + * We may have gotten ENOSPC after we already dirtied some
7432 + * extents. If writeout happens while we're relocating a
7433 + * different block group we could end up hitting the
7434 + * BUG_ON(rc->stage == UPDATE_DATA_PTRS) in
7435 + * btrfs_reloc_cow_block. Make sure we write everything out
7436 + * properly so we don't trip over this problem, and then break
7437 + * out of the loop if we hit an error.
7438 + */
7439 if (rc->stage == MOVE_DATA_EXTENTS && rc->found_file_extent) {
7440 ret = btrfs_wait_ordered_range(rc->data_inode, 0,
7441 (u64)-1);
7442 - if (ret) {
7443 + if (ret)
7444 err = ret;
7445 - goto out;
7446 - }
7447 invalidate_mapping_pages(rc->data_inode->i_mapping,
7448 0, -1);
7449 rc->stage = UPDATE_DATA_PTRS;
7450 }
7451 +
7452 + if (err < 0)
7453 + goto out;
7454 +
7455 + if (rc->extents_found == 0)
7456 + break;
7457 +
7458 + btrfs_info(fs_info, "found %llu extents", rc->extents_found);
7459 +
7460 }
7461
7462 WARN_ON(rc->block_group->pinned > 0);
7463 diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c
7464 index 65bda0682928..3228d3b3084a 100644
7465 --- a/fs/btrfs/root-tree.c
7466 +++ b/fs/btrfs/root-tree.c
7467 @@ -132,16 +132,17 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root
7468 return -ENOMEM;
7469
7470 ret = btrfs_search_slot(trans, root, key, path, 0, 1);
7471 - if (ret < 0) {
7472 - btrfs_abort_transaction(trans, ret);
7473 + if (ret < 0)
7474 goto out;
7475 - }
7476
7477 - if (ret != 0) {
7478 - btrfs_print_leaf(path->nodes[0]);
7479 - btrfs_crit(fs_info, "unable to update root key %llu %u %llu",
7480 - key->objectid, key->type, key->offset);
7481 - BUG_ON(1);
7482 + if (ret > 0) {
7483 + btrfs_crit(fs_info,
7484 + "unable to find root key (%llu %u %llu) in tree %llu",
7485 + key->objectid, key->type, key->offset,
7486 + root->root_key.objectid);
7487 + ret = -EUCLEAN;
7488 + btrfs_abort_transaction(trans, ret);
7489 + goto out;
7490 }
7491
7492 l = path->nodes[0];
7493 diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
7494 index 3717c864ba23..aefb0169d46d 100644
7495 --- a/fs/btrfs/sysfs.c
7496 +++ b/fs/btrfs/sysfs.c
7497 @@ -811,7 +811,12 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs,
7498 fs_devs->fsid_kobj.kset = btrfs_kset;
7499 error = kobject_init_and_add(&fs_devs->fsid_kobj,
7500 &btrfs_ktype, parent, "%pU", fs_devs->fsid);
7501 - return error;
7502 + if (error) {
7503 + kobject_put(&fs_devs->fsid_kobj);
7504 + return error;
7505 + }
7506 +
7507 + return 0;
7508 }
7509
7510 int btrfs_sysfs_add_mounted(struct btrfs_fs_info *fs_info)
7511 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
7512 index 2f4f0958e5f2..75051d36dc1a 100644
7513 --- a/fs/btrfs/tree-log.c
7514 +++ b/fs/btrfs/tree-log.c
7515 @@ -4121,6 +4121,7 @@ fill_holes:
7516 *last_extent, 0,
7517 0, len, 0, len,
7518 0, 0, 0);
7519 + *last_extent += len;
7520 }
7521 }
7522 }
7523 diff --git a/fs/char_dev.c b/fs/char_dev.c
7524 index a279c58fe360..8a63cfa29005 100644
7525 --- a/fs/char_dev.c
7526 +++ b/fs/char_dev.c
7527 @@ -159,6 +159,12 @@ __register_chrdev_region(unsigned int major, unsigned int baseminor,
7528 ret = -EBUSY;
7529 goto out;
7530 }
7531 +
7532 + if (new_min < old_min && new_max > old_max) {
7533 + ret = -EBUSY;
7534 + goto out;
7535 + }
7536 +
7537 }
7538
7539 cd->next = *cp;
7540 diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
7541 index 67e8aa35197e..05dc5a4ba481 100644
7542 --- a/fs/ext4/inode.c
7543 +++ b/fs/ext4/inode.c
7544 @@ -5596,25 +5596,22 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
7545 up_write(&EXT4_I(inode)->i_data_sem);
7546 ext4_journal_stop(handle);
7547 if (error) {
7548 - if (orphan)
7549 + if (orphan && inode->i_nlink)
7550 ext4_orphan_del(NULL, inode);
7551 goto err_out;
7552 }
7553 }
7554 - if (!shrink)
7555 + if (!shrink) {
7556 pagecache_isize_extended(inode, oldsize, inode->i_size);
7557 -
7558 - /*
7559 - * Blocks are going to be removed from the inode. Wait
7560 - * for dio in flight. Temporarily disable
7561 - * dioread_nolock to prevent livelock.
7562 - */
7563 - if (orphan) {
7564 - if (!ext4_should_journal_data(inode)) {
7565 - inode_dio_wait(inode);
7566 - } else
7567 - ext4_wait_for_tail_page_commit(inode);
7568 + } else {
7569 + /*
7570 + * Blocks are going to be removed from the inode. Wait
7571 + * for dio in flight.
7572 + */
7573 + inode_dio_wait(inode);
7574 }
7575 + if (orphan && ext4_should_journal_data(inode))
7576 + ext4_wait_for_tail_page_commit(inode);
7577 down_write(&EXT4_I(inode)->i_mmap_sem);
7578
7579 rc = ext4_break_layouts(inode);
7580 diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
7581 index 08314fb42652..4d02e76b648a 100644
7582 --- a/fs/f2fs/data.c
7583 +++ b/fs/f2fs/data.c
7584 @@ -197,12 +197,14 @@ struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
7585 struct block_device *bdev = sbi->sb->s_bdev;
7586 int i;
7587
7588 - for (i = 0; i < sbi->s_ndevs; i++) {
7589 - if (FDEV(i).start_blk <= blk_addr &&
7590 - FDEV(i).end_blk >= blk_addr) {
7591 - blk_addr -= FDEV(i).start_blk;
7592 - bdev = FDEV(i).bdev;
7593 - break;
7594 + if (f2fs_is_multi_device(sbi)) {
7595 + for (i = 0; i < sbi->s_ndevs; i++) {
7596 + if (FDEV(i).start_blk <= blk_addr &&
7597 + FDEV(i).end_blk >= blk_addr) {
7598 + blk_addr -= FDEV(i).start_blk;
7599 + bdev = FDEV(i).bdev;
7600 + break;
7601 + }
7602 }
7603 }
7604 if (bio) {
7605 @@ -216,6 +218,9 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
7606 {
7607 int i;
7608
7609 + if (!f2fs_is_multi_device(sbi))
7610 + return 0;
7611 +
7612 for (i = 0; i < sbi->s_ndevs; i++)
7613 if (FDEV(i).start_blk <= blkaddr && FDEV(i).end_blk >= blkaddr)
7614 return i;
7615 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
7616 index 1f5d5f62bb77..a4b6eacf22ea 100644
7617 --- a/fs/f2fs/f2fs.h
7618 +++ b/fs/f2fs/f2fs.h
7619 @@ -1336,6 +1336,17 @@ static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
7620 }
7621 #endif
7622
7623 +/*
7624 + * Test if the mounted volume is a multi-device volume.
7625 + * - For a single regular disk volume, sbi->s_ndevs is 0.
7626 + * - For a single zoned disk volume, sbi->s_ndevs is 1.
7627 + * - For a multi-device volume, sbi->s_ndevs is always 2 or more.
7628 + */
7629 +static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
7630 +{
7631 + return sbi->s_ndevs > 1;
7632 +}
7633 +
7634 /* For write statistics. Suppose sector size is 512 bytes,
7635 * and the return value is in kbytes. s is of struct f2fs_sb_info.
7636 */
7637 @@ -3455,7 +3466,7 @@ static inline bool f2fs_force_buffered_io(struct inode *inode, int rw)
7638 {
7639 return (f2fs_post_read_required(inode) ||
7640 (rw == WRITE && test_opt(F2FS_I_SB(inode), LFS)) ||
7641 - F2FS_I_SB(inode)->s_ndevs);
7642 + f2fs_is_multi_device(F2FS_I_SB(inode)));
7643 }
7644
7645 #ifdef CONFIG_F2FS_FAULT_INJECTION
7646 diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
7647 index b3f46e3bec17..8d1eb8dec605 100644
7648 --- a/fs/f2fs/file.c
7649 +++ b/fs/f2fs/file.c
7650 @@ -2539,7 +2539,7 @@ static int f2fs_ioc_flush_device(struct file *filp, unsigned long arg)
7651 sizeof(range)))
7652 return -EFAULT;
7653
7654 - if (sbi->s_ndevs <= 1 || sbi->s_ndevs - 1 <= range.dev_num ||
7655 + if (!f2fs_is_multi_device(sbi) || sbi->s_ndevs - 1 <= range.dev_num ||
7656 sbi->segs_per_sec != 1) {
7657 f2fs_msg(sbi->sb, KERN_WARNING,
7658 "Can't flush %u in %d for segs_per_sec %u != 1\n",
7659 diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
7660 index 5c8d00422237..d44b57a363ff 100644
7661 --- a/fs/f2fs/gc.c
7662 +++ b/fs/f2fs/gc.c
7663 @@ -1256,7 +1256,7 @@ void f2fs_build_gc_manager(struct f2fs_sb_info *sbi)
7664 sbi->gc_pin_file_threshold = DEF_GC_FAILED_PINNED_FILES;
7665
7666 /* give warm/cold data area from slower device */
7667 - if (sbi->s_ndevs && sbi->segs_per_sec == 1)
7668 + if (f2fs_is_multi_device(sbi) && sbi->segs_per_sec == 1)
7669 SIT_I(sbi)->last_victim[ALLOC_NEXT] =
7670 GET_SEGNO(sbi, FDEV(0).end_blk) + 1;
7671 }
7672 diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
7673 index ac038563273d..03fa2c4d3d79 100644
7674 --- a/fs/f2fs/segment.c
7675 +++ b/fs/f2fs/segment.c
7676 @@ -574,7 +574,7 @@ static int submit_flush_wait(struct f2fs_sb_info *sbi, nid_t ino)
7677 int ret = 0;
7678 int i;
7679
7680 - if (!sbi->s_ndevs)
7681 + if (!f2fs_is_multi_device(sbi))
7682 return __submit_flush_wait(sbi, sbi->sb->s_bdev);
7683
7684 for (i = 0; i < sbi->s_ndevs; i++) {
7685 @@ -640,7 +640,8 @@ int f2fs_issue_flush(struct f2fs_sb_info *sbi, nid_t ino)
7686 return ret;
7687 }
7688
7689 - if (atomic_inc_return(&fcc->issing_flush) == 1 || sbi->s_ndevs > 1) {
7690 + if (atomic_inc_return(&fcc->issing_flush) == 1 ||
7691 + f2fs_is_multi_device(sbi)) {
7692 ret = submit_flush_wait(sbi, ino);
7693 atomic_dec(&fcc->issing_flush);
7694
7695 @@ -746,7 +747,7 @@ int f2fs_flush_device_cache(struct f2fs_sb_info *sbi)
7696 {
7697 int ret = 0, i;
7698
7699 - if (!sbi->s_ndevs)
7700 + if (!f2fs_is_multi_device(sbi))
7701 return 0;
7702
7703 for (i = 1; i < sbi->s_ndevs; i++) {
7704 @@ -1289,7 +1290,7 @@ static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
7705
7706 trace_f2fs_queue_discard(bdev, blkstart, blklen);
7707
7708 - if (sbi->s_ndevs) {
7709 + if (f2fs_is_multi_device(sbi)) {
7710 int devi = f2fs_target_device_index(sbi, blkstart);
7711
7712 blkstart -= FDEV(devi).start_blk;
7713 @@ -1638,7 +1639,7 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
7714 block_t lblkstart = blkstart;
7715 int devi = 0;
7716
7717 - if (sbi->s_ndevs) {
7718 + if (f2fs_is_multi_device(sbi)) {
7719 devi = f2fs_target_device_index(sbi, blkstart);
7720 blkstart -= FDEV(devi).start_blk;
7721 }
7722 @@ -2971,7 +2972,7 @@ static void update_device_state(struct f2fs_io_info *fio)
7723 struct f2fs_sb_info *sbi = fio->sbi;
7724 unsigned int devidx;
7725
7726 - if (!sbi->s_ndevs)
7727 + if (!f2fs_is_multi_device(sbi))
7728 return;
7729
7730 devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
7731 diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
7732 index 9d566e62684c..ccdd8c821abd 100644
7733 --- a/fs/gfs2/glock.c
7734 +++ b/fs/gfs2/glock.c
7735 @@ -140,6 +140,7 @@ void gfs2_glock_free(struct gfs2_glock *gl)
7736 {
7737 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
7738
7739 + BUG_ON(atomic_read(&gl->gl_revokes));
7740 rhashtable_remove_fast(&gl_hash_table, &gl->gl_node, ht_parms);
7741 smp_mb();
7742 wake_up_glock(gl);
7743 @@ -183,15 +184,19 @@ static int demote_ok(const struct gfs2_glock *gl)
7744
7745 void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
7746 {
7747 + if (!(gl->gl_ops->go_flags & GLOF_LRU))
7748 + return;
7749 +
7750 spin_lock(&lru_lock);
7751
7752 - if (!list_empty(&gl->gl_lru))
7753 - list_del_init(&gl->gl_lru);
7754 - else
7755 + list_del(&gl->gl_lru);
7756 + list_add_tail(&gl->gl_lru, &lru_list);
7757 +
7758 + if (!test_bit(GLF_LRU, &gl->gl_flags)) {
7759 + set_bit(GLF_LRU, &gl->gl_flags);
7760 atomic_inc(&lru_count);
7761 + }
7762
7763 - list_add_tail(&gl->gl_lru, &lru_list);
7764 - set_bit(GLF_LRU, &gl->gl_flags);
7765 spin_unlock(&lru_lock);
7766 }
7767
7768 @@ -201,7 +206,7 @@ static void gfs2_glock_remove_from_lru(struct gfs2_glock *gl)
7769 return;
7770
7771 spin_lock(&lru_lock);
7772 - if (!list_empty(&gl->gl_lru)) {
7773 + if (test_bit(GLF_LRU, &gl->gl_flags)) {
7774 list_del_init(&gl->gl_lru);
7775 atomic_dec(&lru_count);
7776 clear_bit(GLF_LRU, &gl->gl_flags);
7777 @@ -1158,8 +1163,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
7778 !test_bit(GLF_DEMOTE, &gl->gl_flags))
7779 fast_path = 1;
7780 }
7781 - if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl) &&
7782 - (glops->go_flags & GLOF_LRU))
7783 + if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
7784 gfs2_glock_add_to_lru(gl);
7785
7786 trace_gfs2_glock_queue(gh, 0);
7787 @@ -1455,6 +1459,7 @@ __acquires(&lru_lock)
7788 if (!spin_trylock(&gl->gl_lockref.lock)) {
7789 add_back_to_lru:
7790 list_add(&gl->gl_lru, &lru_list);
7791 + set_bit(GLF_LRU, &gl->gl_flags);
7792 atomic_inc(&lru_count);
7793 continue;
7794 }
7795 @@ -1462,7 +1467,6 @@ add_back_to_lru:
7796 spin_unlock(&gl->gl_lockref.lock);
7797 goto add_back_to_lru;
7798 }
7799 - clear_bit(GLF_LRU, &gl->gl_flags);
7800 gl->gl_lockref.count++;
7801 if (demote_ok(gl))
7802 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
7803 @@ -1497,6 +1501,7 @@ static long gfs2_scan_glock_lru(int nr)
7804 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
7805 list_move(&gl->gl_lru, &dispose);
7806 atomic_dec(&lru_count);
7807 + clear_bit(GLF_LRU, &gl->gl_flags);
7808 freed++;
7809 continue;
7810 }
7811 diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
7812 index ac7caa267ed6..62edf8f5615f 100644
7813 --- a/fs/gfs2/lock_dlm.c
7814 +++ b/fs/gfs2/lock_dlm.c
7815 @@ -31,9 +31,10 @@
7816 * @delta is the difference between the current rtt sample and the
7817 * running average srtt. We add 1/8 of that to the srtt in order to
7818 * update the current srtt estimate. The variance estimate is a bit
7819 - * more complicated. We subtract the abs value of the @delta from
7820 - * the current variance estimate and add 1/4 of that to the running
7821 - * total.
7822 + * more complicated. We subtract the current variance estimate from
7823 + * the abs value of the @delta and add 1/4 of that to the running
7824 + * total. That's equivalent to 3/4 of the current variance
7825 + * estimate plus 1/4 of the abs of @delta.
7826 *
7827 * Note that the index points at the array entry containing the smoothed
7828 * mean value, and the variance is always in the following entry
7829 @@ -49,7 +50,7 @@ static inline void gfs2_update_stats(struct gfs2_lkstats *s, unsigned index,
7830 s64 delta = sample - s->stats[index];
7831 s->stats[index] += (delta >> 3);
7832 index++;
7833 - s->stats[index] += ((abs(delta) - s->stats[index]) >> 2);
7834 + s->stats[index] += (s64)(abs(delta) - s->stats[index]) >> 2;
7835 }
7836
7837 /**
7838 diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
7839 index ee20ea42e7b5..cd85092723de 100644
7840 --- a/fs/gfs2/log.c
7841 +++ b/fs/gfs2/log.c
7842 @@ -604,7 +604,8 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
7843 bd->bd_bh = NULL;
7844 bd->bd_ops = &gfs2_revoke_lops;
7845 sdp->sd_log_num_revoke++;
7846 - atomic_inc(&gl->gl_revokes);
7847 + if (atomic_inc_return(&gl->gl_revokes) == 1)
7848 + gfs2_glock_hold(gl);
7849 set_bit(GLF_LFLUSH, &gl->gl_flags);
7850 list_add(&bd->bd_list, &sdp->sd_log_le_revoke);
7851 }
7852 diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
7853 index f2567f958d00..8f99b395d7bf 100644
7854 --- a/fs/gfs2/lops.c
7855 +++ b/fs/gfs2/lops.c
7856 @@ -662,8 +662,10 @@ static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
7857 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
7858 list_del_init(&bd->bd_list);
7859 gl = bd->bd_gl;
7860 - atomic_dec(&gl->gl_revokes);
7861 - clear_bit(GLF_LFLUSH, &gl->gl_flags);
7862 + if (atomic_dec_return(&gl->gl_revokes) == 0) {
7863 + clear_bit(GLF_LFLUSH, &gl->gl_flags);
7864 + gfs2_glock_queue_put(gl);
7865 + }
7866 kmem_cache_free(gfs2_bufdata_cachep, bd);
7867 }
7868 }
7869 diff --git a/fs/internal.h b/fs/internal.h
7870 index d410186bc369..d109665b9e50 100644
7871 --- a/fs/internal.h
7872 +++ b/fs/internal.h
7873 @@ -80,9 +80,7 @@ extern int sb_prepare_remount_readonly(struct super_block *);
7874
7875 extern void __init mnt_init(void);
7876
7877 -extern int __mnt_want_write(struct vfsmount *);
7878 extern int __mnt_want_write_file(struct file *);
7879 -extern void __mnt_drop_write(struct vfsmount *);
7880 extern void __mnt_drop_write_file(struct file *);
7881
7882 /*
7883 diff --git a/fs/nfs/client.c b/fs/nfs/client.c
7884 index 751ca65da8a3..c092661147b3 100644
7885 --- a/fs/nfs/client.c
7886 +++ b/fs/nfs/client.c
7887 @@ -290,6 +290,7 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
7888 struct nfs_client *clp;
7889 const struct sockaddr *sap = data->addr;
7890 struct nfs_net *nn = net_generic(data->net, nfs_net_id);
7891 + int error;
7892
7893 again:
7894 list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
7895 @@ -302,9 +303,11 @@ again:
7896 if (clp->cl_cons_state > NFS_CS_READY) {
7897 refcount_inc(&clp->cl_count);
7898 spin_unlock(&nn->nfs_client_lock);
7899 - nfs_wait_client_init_complete(clp);
7900 + error = nfs_wait_client_init_complete(clp);
7901 nfs_put_client(clp);
7902 spin_lock(&nn->nfs_client_lock);
7903 + if (error < 0)
7904 + return ERR_PTR(error);
7905 goto again;
7906 }
7907
7908 @@ -413,6 +416,8 @@ struct nfs_client *nfs_get_client(const struct nfs_client_initdata *cl_init)
7909 clp = nfs_match_client(cl_init);
7910 if (clp) {
7911 spin_unlock(&nn->nfs_client_lock);
7912 + if (IS_ERR(clp))
7913 + return clp;
7914 if (new)
7915 new->rpc_ops->free_client(new);
7916 return nfs_found_client(cl_init, clp);
7917 diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
7918 index fed06fd9998d..94f98e190e63 100644
7919 --- a/fs/nfs/nfs42proc.c
7920 +++ b/fs/nfs/nfs42proc.c
7921 @@ -329,9 +329,6 @@ ssize_t nfs42_proc_copy(struct file *src, loff_t pos_src,
7922 };
7923 ssize_t err, err2;
7924
7925 - if (!nfs_server_capable(file_inode(dst), NFS_CAP_COPY))
7926 - return -EOPNOTSUPP;
7927 -
7928 src_lock = nfs_get_lock_context(nfs_file_open_context(src));
7929 if (IS_ERR(src_lock))
7930 return PTR_ERR(src_lock);
7931 diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
7932 index 4288a6ecaf75..134858507268 100644
7933 --- a/fs/nfs/nfs4file.c
7934 +++ b/fs/nfs/nfs4file.c
7935 @@ -133,15 +133,11 @@ static ssize_t nfs4_copy_file_range(struct file *file_in, loff_t pos_in,
7936 struct file *file_out, loff_t pos_out,
7937 size_t count, unsigned int flags)
7938 {
7939 - ssize_t ret;
7940 -
7941 + if (!nfs_server_capable(file_inode(file_out), NFS_CAP_COPY))
7942 + return -EOPNOTSUPP;
7943 if (file_inode(file_in) == file_inode(file_out))
7944 - return -EINVAL;
7945 -retry:
7946 - ret = nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
7947 - if (ret == -EAGAIN)
7948 - goto retry;
7949 - return ret;
7950 + return -EOPNOTSUPP;
7951 + return nfs42_proc_copy(file_in, pos_in, file_out, pos_out, count);
7952 }
7953
7954 static loff_t nfs4_file_llseek(struct file *filep, loff_t offset, int whence)
7955 diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
7956 index b2aadd3e1fec..336f04da80ed 100644
7957 --- a/fs/overlayfs/dir.c
7958 +++ b/fs/overlayfs/dir.c
7959 @@ -260,7 +260,7 @@ static int ovl_instantiate(struct dentry *dentry, struct inode *inode,
7960 * hashed directory inode aliases.
7961 */
7962 inode = ovl_get_inode(dentry->d_sb, &oip);
7963 - if (WARN_ON(IS_ERR(inode)))
7964 + if (IS_ERR(inode))
7965 return PTR_ERR(inode);
7966 } else {
7967 WARN_ON(ovl_inode_real(inode) != d_inode(newdentry));
7968 diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
7969 index 3b7ed5d2279c..b48273e846ad 100644
7970 --- a/fs/overlayfs/inode.c
7971 +++ b/fs/overlayfs/inode.c
7972 @@ -832,7 +832,7 @@ struct inode *ovl_get_inode(struct super_block *sb,
7973 int fsid = bylower ? oip->lowerpath->layer->fsid : 0;
7974 bool is_dir, metacopy = false;
7975 unsigned long ino = 0;
7976 - int err = -ENOMEM;
7977 + int err = oip->newinode ? -EEXIST : -ENOMEM;
7978
7979 if (!realinode)
7980 realinode = d_inode(lowerdentry);
7981 @@ -917,6 +917,7 @@ out:
7982 return inode;
7983
7984 out_err:
7985 + pr_warn_ratelimited("overlayfs: failed to get inode (%i)\n", err);
7986 inode = ERR_PTR(err);
7987 goto out;
7988 }
7989 diff --git a/include/drm/tinydrm/mipi-dbi.h b/include/drm/tinydrm/mipi-dbi.h
7990 index b8ba58861986..bcc98bd447f7 100644
7991 --- a/include/drm/tinydrm/mipi-dbi.h
7992 +++ b/include/drm/tinydrm/mipi-dbi.h
7993 @@ -42,7 +42,7 @@ struct mipi_dbi {
7994 struct spi_device *spi;
7995 bool enabled;
7996 struct mutex cmdlock;
7997 - int (*command)(struct mipi_dbi *mipi, u8 cmd, u8 *param, size_t num);
7998 + int (*command)(struct mipi_dbi *mipi, u8 *cmd, u8 *param, size_t num);
7999 const u8 *read_commands;
8000 struct gpio_desc *dc;
8001 u16 *tx_buf;
8002 @@ -79,6 +79,7 @@ u32 mipi_dbi_spi_cmd_max_speed(struct spi_device *spi, size_t len);
8003
8004 int mipi_dbi_command_read(struct mipi_dbi *mipi, u8 cmd, u8 *val);
8005 int mipi_dbi_command_buf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
8006 +int mipi_dbi_command_stackbuf(struct mipi_dbi *mipi, u8 cmd, u8 *data, size_t len);
8007 int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
8008 struct drm_clip_rect *clip, bool swap);
8009 /**
8010 @@ -96,7 +97,7 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
8011 #define mipi_dbi_command(mipi, cmd, seq...) \
8012 ({ \
8013 u8 d[] = { seq }; \
8014 - mipi_dbi_command_buf(mipi, cmd, d, ARRAY_SIZE(d)); \
8015 + mipi_dbi_command_stackbuf(mipi, cmd, d, ARRAY_SIZE(d)); \
8016 })
8017
8018 #ifdef CONFIG_DEBUG_FS
8019 diff --git a/include/linux/bio.h b/include/linux/bio.h
8020 index 51371740d2a8..c7433a201171 100644
8021 --- a/include/linux/bio.h
8022 +++ b/include/linux/bio.h
8023 @@ -257,7 +257,7 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
8024 {
8025 if (count != 1) {
8026 bio->bi_flags |= (1 << BIO_REFFED);
8027 - smp_mb__before_atomic();
8028 + smp_mb();
8029 }
8030 atomic_set(&bio->__bi_cnt, count);
8031 }
8032 diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
8033 index 6002275937f5..a6090154b2ab 100644
8034 --- a/include/linux/cgroup-defs.h
8035 +++ b/include/linux/cgroup-defs.h
8036 @@ -346,6 +346,11 @@ struct cgroup {
8037 * Dying cgroups are cgroups which were deleted by a user,
8038 * but are still existing because someone else is holding a reference.
8039 * max_descendants is a maximum allowed number of descent cgroups.
8040 + *
8041 + * nr_descendants and nr_dying_descendants are protected
8042 + * by cgroup_mutex and css_set_lock. It's fine to read them holding
8043 + * any of cgroup_mutex and css_set_lock; for writing both locks
8044 + * should be held.
8045 */
8046 int nr_descendants;
8047 int nr_dying_descendants;
8048 diff --git a/include/linux/filter.h b/include/linux/filter.h
8049 index 037610845892..d52a7484aeb2 100644
8050 --- a/include/linux/filter.h
8051 +++ b/include/linux/filter.h
8052 @@ -684,6 +684,7 @@ static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
8053 static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
8054 {
8055 set_memory_ro((unsigned long)hdr, hdr->pages);
8056 + set_memory_x((unsigned long)hdr, hdr->pages);
8057 }
8058
8059 static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
8060 @@ -836,6 +837,7 @@ bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
8061 extern int bpf_jit_enable;
8062 extern int bpf_jit_harden;
8063 extern int bpf_jit_kallsyms;
8064 +extern int bpf_jit_limit;
8065
8066 typedef void (*bpf_jit_fill_hole_t)(void *area, unsigned int size);
8067
8068 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
8069 index f767293b00e6..f13272d84332 100644
8070 --- a/include/linux/genhd.h
8071 +++ b/include/linux/genhd.h
8072 @@ -596,6 +596,7 @@ struct unixware_disklabel {
8073
8074 extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
8075 extern void blk_free_devt(dev_t devt);
8076 +extern void blk_invalidate_devt(dev_t devt);
8077 extern dev_t blk_lookup_devt(const char *name, int partno);
8078 extern char *disk_name (struct gendisk *hd, int partno, char *buf);
8079
8080 diff --git a/include/linux/hid.h b/include/linux/hid.h
8081 index d44a78362942..8b3e5e8a72fb 100644
8082 --- a/include/linux/hid.h
8083 +++ b/include/linux/hid.h
8084 @@ -414,6 +414,7 @@ struct hid_global {
8085
8086 struct hid_local {
8087 unsigned usage[HID_MAX_USAGES]; /* usage array */
8088 + u8 usage_size[HID_MAX_USAGES]; /* usage size array */
8089 unsigned collection_index[HID_MAX_USAGES]; /* collection index array */
8090 unsigned usage_index;
8091 unsigned usage_minimum;
8092 diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
8093 index 730ead1a46df..57c122ae5452 100644
8094 --- a/include/linux/iio/adc/ad_sigma_delta.h
8095 +++ b/include/linux/iio/adc/ad_sigma_delta.h
8096 @@ -66,6 +66,7 @@ struct ad_sigma_delta {
8097 bool irq_dis;
8098
8099 bool bus_locked;
8100 + bool keep_cs_asserted;
8101
8102 uint8_t comm;
8103
8104 diff --git a/include/linux/mount.h b/include/linux/mount.h
8105 index 45b1f56c6c2f..4b0db4418954 100644
8106 --- a/include/linux/mount.h
8107 +++ b/include/linux/mount.h
8108 @@ -86,6 +86,8 @@ extern bool mnt_may_suid(struct vfsmount *mnt);
8109
8110 struct path;
8111 extern struct vfsmount *clone_private_mount(const struct path *path);
8112 +extern int __mnt_want_write(struct vfsmount *);
8113 +extern void __mnt_drop_write(struct vfsmount *);
8114
8115 struct file_system_type;
8116 extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
8117 diff --git a/include/linux/overflow.h b/include/linux/overflow.h
8118 index 40b48e2133cb..15eb85de9226 100644
8119 --- a/include/linux/overflow.h
8120 +++ b/include/linux/overflow.h
8121 @@ -36,6 +36,12 @@
8122 #define type_max(T) ((T)((__type_half_max(T) - 1) + __type_half_max(T)))
8123 #define type_min(T) ((T)((T)-type_max(T)-(T)1))
8124
8125 +/*
8126 + * Avoids triggering -Wtype-limits compilation warning,
8127 + * while using unsigned data types to check a < 0.
8128 + */
8129 +#define is_non_negative(a) ((a) > 0 || (a) == 0)
8130 +#define is_negative(a) (!(is_non_negative(a)))
8131
8132 #ifdef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
8133 /*
8134 @@ -227,10 +233,10 @@
8135 typeof(d) _d = d; \
8136 u64 _a_full = _a; \
8137 unsigned int _to_shift = \
8138 - _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
8139 + is_non_negative(_s) && _s < 8 * sizeof(*d) ? _s : 0; \
8140 *_d = (_a_full << _to_shift); \
8141 - (_to_shift != _s || *_d < 0 || _a < 0 || \
8142 - (*_d >> _to_shift) != _a); \
8143 + (_to_shift != _s || is_negative(*_d) || is_negative(_a) || \
8144 + (*_d >> _to_shift) != _a); \
8145 })
8146
8147 /**
8148 diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
8149 index d0884b525001..9d1bc65d226c 100644
8150 --- a/include/linux/smpboot.h
8151 +++ b/include/linux/smpboot.h
8152 @@ -29,7 +29,7 @@ struct smpboot_thread_data;
8153 * @thread_comm: The base name of the thread
8154 */
8155 struct smp_hotplug_thread {
8156 - struct task_struct __percpu **store;
8157 + struct task_struct * __percpu *store;
8158 struct list_head list;
8159 int (*thread_should_run)(unsigned int cpu);
8160 void (*thread_fn)(unsigned int cpu);
8161 diff --git a/include/linux/time64.h b/include/linux/time64.h
8162 index 05634afba0db..4a45aea0f96e 100644
8163 --- a/include/linux/time64.h
8164 +++ b/include/linux/time64.h
8165 @@ -41,6 +41,17 @@ struct itimerspec64 {
8166 #define KTIME_MAX ((s64)~((u64)1 << 63))
8167 #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
8168
8169 +/*
8170 + * Limits for settimeofday():
8171 + *
8172 + * To prevent setting the time close to the wraparound point time setting
8173 + * is limited so a reasonable uptime can be accomodated. Uptime of 30 years
8174 + * should be really sufficient, which means the cutoff is 2232. At that
8175 + * point the cutoff is just a small part of the larger problem.
8176 + */
8177 +#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600)
8178 +#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX)
8179 +
8180 static inline int timespec64_equal(const struct timespec64 *a,
8181 const struct timespec64 *b)
8182 {
8183 @@ -108,6 +119,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts)
8184 return true;
8185 }
8186
8187 +static inline bool timespec64_valid_settod(const struct timespec64 *ts)
8188 +{
8189 + if (!timespec64_valid(ts))
8190 + return false;
8191 + /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */
8192 + if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX)
8193 + return false;
8194 + return true;
8195 +}
8196 +
8197 /**
8198 * timespec64_to_ns - Convert timespec64 to nanoseconds
8199 * @ts: pointer to the timespec64 variable to be converted
8200 diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
8201 index f6818f732f34..bddd86c11f5f 100644
8202 --- a/include/media/videobuf2-core.h
8203 +++ b/include/media/videobuf2-core.h
8204 @@ -551,6 +551,7 @@ struct vb2_queue {
8205 unsigned int start_streaming_called:1;
8206 unsigned int error:1;
8207 unsigned int waiting_for_buffers:1;
8208 + unsigned int waiting_in_dqbuf:1;
8209 unsigned int is_multiplanar:1;
8210 unsigned int is_output:1;
8211 unsigned int copy_timestamp:1;
8212 diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
8213 index cdd9f1fe7cfa..845d947dbae8 100644
8214 --- a/include/net/bluetooth/hci.h
8215 +++ b/include/net/bluetooth/hci.h
8216 @@ -270,6 +270,7 @@ enum {
8217 HCI_FORCE_BREDR_SMP,
8218 HCI_FORCE_STATIC_ADDR,
8219 HCI_LL_RPA_RESOLUTION,
8220 + HCI_CMD_PENDING,
8221
8222 __HCI_NUM_FLAGS,
8223 };
8224 diff --git a/kernel/acct.c b/kernel/acct.c
8225 index addf7732fb56..81f9831a7859 100644
8226 --- a/kernel/acct.c
8227 +++ b/kernel/acct.c
8228 @@ -227,7 +227,7 @@ static int acct_on(struct filename *pathname)
8229 filp_close(file, NULL);
8230 return PTR_ERR(internal);
8231 }
8232 - err = mnt_want_write(internal);
8233 + err = __mnt_want_write(internal);
8234 if (err) {
8235 mntput(internal);
8236 kfree(acct);
8237 @@ -252,7 +252,7 @@ static int acct_on(struct filename *pathname)
8238 old = xchg(&ns->bacct, &acct->pin);
8239 mutex_unlock(&acct->lock);
8240 pin_kill(old);
8241 - mnt_drop_write(mnt);
8242 + __mnt_drop_write(mnt);
8243 mntput(mnt);
8244 return 0;
8245 }
8246 diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
8247 index bf309f2592c4..425c67e4f568 100644
8248 --- a/kernel/auditfilter.c
8249 +++ b/kernel/auditfilter.c
8250 @@ -1114,22 +1114,24 @@ int audit_rule_change(int type, int seq, void *data, size_t datasz)
8251 int err = 0;
8252 struct audit_entry *entry;
8253
8254 - entry = audit_data_to_entry(data, datasz);
8255 - if (IS_ERR(entry))
8256 - return PTR_ERR(entry);
8257 -
8258 switch (type) {
8259 case AUDIT_ADD_RULE:
8260 + entry = audit_data_to_entry(data, datasz);
8261 + if (IS_ERR(entry))
8262 + return PTR_ERR(entry);
8263 err = audit_add_rule(entry);
8264 audit_log_rule_change("add_rule", &entry->rule, !err);
8265 break;
8266 case AUDIT_DEL_RULE:
8267 + entry = audit_data_to_entry(data, datasz);
8268 + if (IS_ERR(entry))
8269 + return PTR_ERR(entry);
8270 err = audit_del_rule(entry);
8271 audit_log_rule_change("remove_rule", &entry->rule, !err);
8272 break;
8273 default:
8274 - err = -EINVAL;
8275 WARN_ON(1);
8276 + return -EINVAL;
8277 }
8278
8279 if (err || type == AUDIT_DEL_RULE) {
8280 diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
8281 index 474525e3a9db..bad9985b8a08 100644
8282 --- a/kernel/bpf/core.c
8283 +++ b/kernel/bpf/core.c
8284 @@ -366,10 +366,13 @@ void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
8285 }
8286
8287 #ifdef CONFIG_BPF_JIT
8288 +# define BPF_JIT_LIMIT_DEFAULT (PAGE_SIZE * 40000)
8289 +
8290 /* All BPF JIT sysctl knobs here. */
8291 int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
8292 int bpf_jit_harden __read_mostly;
8293 int bpf_jit_kallsyms __read_mostly;
8294 +int bpf_jit_limit __read_mostly = BPF_JIT_LIMIT_DEFAULT;
8295
8296 static __always_inline void
8297 bpf_get_prog_addr_region(const struct bpf_prog *prog,
8298 @@ -578,27 +581,64 @@ int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
8299 return ret;
8300 }
8301
8302 +static atomic_long_t bpf_jit_current;
8303 +
8304 +#if defined(MODULES_VADDR)
8305 +static int __init bpf_jit_charge_init(void)
8306 +{
8307 + /* Only used as heuristic here to derive limit. */
8308 + bpf_jit_limit = min_t(u64, round_up((MODULES_END - MODULES_VADDR) >> 2,
8309 + PAGE_SIZE), INT_MAX);
8310 + return 0;
8311 +}
8312 +pure_initcall(bpf_jit_charge_init);
8313 +#endif
8314 +
8315 +static int bpf_jit_charge_modmem(u32 pages)
8316 +{
8317 + if (atomic_long_add_return(pages, &bpf_jit_current) >
8318 + (bpf_jit_limit >> PAGE_SHIFT)) {
8319 + if (!capable(CAP_SYS_ADMIN)) {
8320 + atomic_long_sub(pages, &bpf_jit_current);
8321 + return -EPERM;
8322 + }
8323 + }
8324 +
8325 + return 0;
8326 +}
8327 +
8328 +static void bpf_jit_uncharge_modmem(u32 pages)
8329 +{
8330 + atomic_long_sub(pages, &bpf_jit_current);
8331 +}
8332 +
8333 struct bpf_binary_header *
8334 bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
8335 unsigned int alignment,
8336 bpf_jit_fill_hole_t bpf_fill_ill_insns)
8337 {
8338 struct bpf_binary_header *hdr;
8339 - unsigned int size, hole, start;
8340 + u32 size, hole, start, pages;
8341
8342 /* Most of BPF filters are really small, but if some of them
8343 * fill a page, allow at least 128 extra bytes to insert a
8344 * random section of illegal instructions.
8345 */
8346 size = round_up(proglen + sizeof(*hdr) + 128, PAGE_SIZE);
8347 + pages = size / PAGE_SIZE;
8348 +
8349 + if (bpf_jit_charge_modmem(pages))
8350 + return NULL;
8351 hdr = module_alloc(size);
8352 - if (hdr == NULL)
8353 + if (!hdr) {
8354 + bpf_jit_uncharge_modmem(pages);
8355 return NULL;
8356 + }
8357
8358 /* Fill space with illegal/arch-dep instructions. */
8359 bpf_fill_ill_insns(hdr, size);
8360
8361 - hdr->pages = size / PAGE_SIZE;
8362 + hdr->pages = pages;
8363 hole = min_t(unsigned int, size - (proglen + sizeof(*hdr)),
8364 PAGE_SIZE - sizeof(*hdr));
8365 start = (get_random_int() % hole) & ~(alignment - 1);
8366 @@ -611,7 +651,10 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
8367
8368 void bpf_jit_binary_free(struct bpf_binary_header *hdr)
8369 {
8370 + u32 pages = hdr->pages;
8371 +
8372 module_memfree(hdr);
8373 + bpf_jit_uncharge_modmem(pages);
8374 }
8375
8376 /* This symbol is only overridden by archs that have different
8377 diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
8378 index 141710b82a6c..2faad033715f 100644
8379 --- a/kernel/bpf/devmap.c
8380 +++ b/kernel/bpf/devmap.c
8381 @@ -164,6 +164,9 @@ static void dev_map_free(struct bpf_map *map)
8382 bpf_clear_redirect_map(map);
8383 synchronize_rcu();
8384
8385 + /* Make sure prior __dev_map_entry_free() have completed. */
8386 + rcu_barrier();
8387 +
8388 /* To ensure all pending flush operations have completed wait for flush
8389 * bitmap to indicate all flush_needed bits to be zero on _all_ cpus.
8390 * Because the above synchronize_rcu() ensures the map is disconnected
8391 diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
8392 index 63dae7e0ccae..81441117f611 100644
8393 --- a/kernel/cgroup/cgroup.c
8394 +++ b/kernel/cgroup/cgroup.c
8395 @@ -4659,9 +4659,11 @@ static void css_release_work_fn(struct work_struct *work)
8396 if (cgroup_on_dfl(cgrp))
8397 cgroup_rstat_flush(cgrp);
8398
8399 + spin_lock_irq(&css_set_lock);
8400 for (tcgrp = cgroup_parent(cgrp); tcgrp;
8401 tcgrp = cgroup_parent(tcgrp))
8402 tcgrp->nr_dying_descendants--;
8403 + spin_unlock_irq(&css_set_lock);
8404
8405 cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
8406 cgrp->id = -1;
8407 @@ -4874,12 +4876,14 @@ static struct cgroup *cgroup_create(struct cgroup *parent)
8408 if (ret)
8409 goto out_idr_free;
8410
8411 + spin_lock_irq(&css_set_lock);
8412 for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) {
8413 cgrp->ancestor_ids[tcgrp->level] = tcgrp->id;
8414
8415 if (tcgrp != cgrp)
8416 tcgrp->nr_descendants++;
8417 }
8418 + spin_unlock_irq(&css_set_lock);
8419
8420 if (notify_on_release(parent))
8421 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
8422 @@ -5162,10 +5166,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
8423 if (parent && cgroup_is_threaded(cgrp))
8424 parent->nr_threaded_children--;
8425
8426 + spin_lock_irq(&css_set_lock);
8427 for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) {
8428 tcgrp->nr_descendants--;
8429 tcgrp->nr_dying_descendants++;
8430 }
8431 + spin_unlock_irq(&css_set_lock);
8432
8433 cgroup1_check_for_release(parent);
8434
8435 diff --git a/kernel/irq_work.c b/kernel/irq_work.c
8436 index 6b7cdf17ccf8..73288914ed5e 100644
8437 --- a/kernel/irq_work.c
8438 +++ b/kernel/irq_work.c
8439 @@ -56,61 +56,70 @@ void __weak arch_irq_work_raise(void)
8440 */
8441 }
8442
8443 -/*
8444 - * Enqueue the irq_work @work on @cpu unless it's already pending
8445 - * somewhere.
8446 - *
8447 - * Can be re-enqueued while the callback is still in progress.
8448 - */
8449 -bool irq_work_queue_on(struct irq_work *work, int cpu)
8450 +/* Enqueue on current CPU, work must already be claimed and preempt disabled */
8451 +static void __irq_work_queue_local(struct irq_work *work)
8452 {
8453 - /* All work should have been flushed before going offline */
8454 - WARN_ON_ONCE(cpu_is_offline(cpu));
8455 -
8456 -#ifdef CONFIG_SMP
8457 -
8458 - /* Arch remote IPI send/receive backend aren't NMI safe */
8459 - WARN_ON_ONCE(in_nmi());
8460 + /* If the work is "lazy", handle it from next tick if any */
8461 + if (work->flags & IRQ_WORK_LAZY) {
8462 + if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
8463 + tick_nohz_tick_stopped())
8464 + arch_irq_work_raise();
8465 + } else {
8466 + if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
8467 + arch_irq_work_raise();
8468 + }
8469 +}
8470
8471 +/* Enqueue the irq work @work on the current CPU */
8472 +bool irq_work_queue(struct irq_work *work)
8473 +{
8474 /* Only queue if not already pending */
8475 if (!irq_work_claim(work))
8476 return false;
8477
8478 - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
8479 - arch_send_call_function_single_ipi(cpu);
8480 -
8481 -#else /* #ifdef CONFIG_SMP */
8482 - irq_work_queue(work);
8483 -#endif /* #else #ifdef CONFIG_SMP */
8484 + /* Queue the entry and raise the IPI if needed. */
8485 + preempt_disable();
8486 + __irq_work_queue_local(work);
8487 + preempt_enable();
8488
8489 return true;
8490 }
8491 +EXPORT_SYMBOL_GPL(irq_work_queue);
8492
8493 -/* Enqueue the irq work @work on the current CPU */
8494 -bool irq_work_queue(struct irq_work *work)
8495 +/*
8496 + * Enqueue the irq_work @work on @cpu unless it's already pending
8497 + * somewhere.
8498 + *
8499 + * Can be re-enqueued while the callback is still in progress.
8500 + */
8501 +bool irq_work_queue_on(struct irq_work *work, int cpu)
8502 {
8503 +#ifndef CONFIG_SMP
8504 + return irq_work_queue(work);
8505 +
8506 +#else /* CONFIG_SMP: */
8507 + /* All work should have been flushed before going offline */
8508 + WARN_ON_ONCE(cpu_is_offline(cpu));
8509 +
8510 /* Only queue if not already pending */
8511 if (!irq_work_claim(work))
8512 return false;
8513
8514 - /* Queue the entry and raise the IPI if needed. */
8515 preempt_disable();
8516 -
8517 - /* If the work is "lazy", handle it from next tick if any */
8518 - if (work->flags & IRQ_WORK_LAZY) {
8519 - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
8520 - tick_nohz_tick_stopped())
8521 - arch_irq_work_raise();
8522 + if (cpu != smp_processor_id()) {
8523 + /* Arch remote IPI send/receive backend aren't NMI safe */
8524 + WARN_ON_ONCE(in_nmi());
8525 + if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
8526 + arch_send_call_function_single_ipi(cpu);
8527 } else {
8528 - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
8529 - arch_irq_work_raise();
8530 + __irq_work_queue_local(work);
8531 }
8532 -
8533 preempt_enable();
8534
8535 return true;
8536 +#endif /* CONFIG_SMP */
8537 }
8538 -EXPORT_SYMBOL_GPL(irq_work_queue);
8539 +
8540
8541 bool irq_work_needs_cpu(void)
8542 {
8543 diff --git a/kernel/module.c b/kernel/module.c
8544 index 38bf28b5cc20..f797c6ace712 100644
8545 --- a/kernel/module.c
8546 +++ b/kernel/module.c
8547 @@ -1949,8 +1949,13 @@ void module_enable_ro(const struct module *mod, bool after_init)
8548 return;
8549
8550 frob_text(&mod->core_layout, set_memory_ro);
8551 + frob_text(&mod->core_layout, set_memory_x);
8552 +
8553 frob_rodata(&mod->core_layout, set_memory_ro);
8554 +
8555 frob_text(&mod->init_layout, set_memory_ro);
8556 + frob_text(&mod->init_layout, set_memory_x);
8557 +
8558 frob_rodata(&mod->init_layout, set_memory_ro);
8559
8560 if (after_init)
8561 diff --git a/kernel/rcu/rcuperf.c b/kernel/rcu/rcuperf.c
8562 index 34244523550e..19249b86fb33 100644
8563 --- a/kernel/rcu/rcuperf.c
8564 +++ b/kernel/rcu/rcuperf.c
8565 @@ -561,6 +561,10 @@ rcu_perf_cleanup(void)
8566
8567 if (torture_cleanup_begin())
8568 return;
8569 + if (!cur_ops) {
8570 + torture_cleanup_end();
8571 + return;
8572 + }
8573
8574 if (reader_tasks) {
8575 for (i = 0; i < nrealreaders; i++)
8576 @@ -681,6 +685,7 @@ rcu_perf_init(void)
8577 pr_cont(" %s", perf_ops[i]->name);
8578 pr_cont("\n");
8579 firsterr = -EINVAL;
8580 + cur_ops = NULL;
8581 goto unwind;
8582 }
8583 if (cur_ops->init)
8584 diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
8585 index c596c6f1e457..0b7af7e2bcbb 100644
8586 --- a/kernel/rcu/rcutorture.c
8587 +++ b/kernel/rcu/rcutorture.c
8588 @@ -1826,6 +1826,10 @@ rcu_torture_cleanup(void)
8589 cur_ops->cb_barrier();
8590 return;
8591 }
8592 + if (!cur_ops) {
8593 + torture_cleanup_end();
8594 + return;
8595 + }
8596
8597 rcu_torture_barrier_cleanup();
8598 torture_stop_kthread(rcu_torture_stall, stall_task);
8599 @@ -1964,6 +1968,7 @@ rcu_torture_init(void)
8600 pr_cont(" %s", torture_ops[i]->name);
8601 pr_cont("\n");
8602 firsterr = -EINVAL;
8603 + cur_ops = NULL;
8604 goto unwind;
8605 }
8606 if (cur_ops->fqs == NULL && fqs_duration != 0) {
8607 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
8608 index d7f409866cdf..6138754e5030 100644
8609 --- a/kernel/sched/core.c
8610 +++ b/kernel/sched/core.c
8611 @@ -6491,6 +6491,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
8612 static int cpu_shares_write_u64(struct cgroup_subsys_state *css,
8613 struct cftype *cftype, u64 shareval)
8614 {
8615 + if (shareval > scale_load_down(ULONG_MAX))
8616 + shareval = MAX_SHARES;
8617 return sched_group_set_shares(css_tg(css), scale_load(shareval));
8618 }
8619
8620 @@ -6593,8 +6595,10 @@ int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us)
8621 period = ktime_to_ns(tg->cfs_bandwidth.period);
8622 if (cfs_quota_us < 0)
8623 quota = RUNTIME_INF;
8624 - else
8625 + else if ((u64)cfs_quota_us <= U64_MAX / NSEC_PER_USEC)
8626 quota = (u64)cfs_quota_us * NSEC_PER_USEC;
8627 + else
8628 + return -EINVAL;
8629
8630 return tg_set_cfs_bandwidth(tg, period, quota);
8631 }
8632 @@ -6616,6 +6620,9 @@ int tg_set_cfs_period(struct task_group *tg, long cfs_period_us)
8633 {
8634 u64 quota, period;
8635
8636 + if ((u64)cfs_period_us > U64_MAX / NSEC_PER_USEC)
8637 + return -EINVAL;
8638 +
8639 period = (u64)cfs_period_us * NSEC_PER_USEC;
8640 quota = tg->cfs_bandwidth.quota;
8641
8642 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
8643 index d31916366d39..7a1e9db617f7 100644
8644 --- a/kernel/sched/fair.c
8645 +++ b/kernel/sched/fair.c
8646 @@ -9083,22 +9083,26 @@ static inline int on_null_domain(struct rq *rq)
8647 * - When one of the busy CPUs notice that there may be an idle rebalancing
8648 * needed, they will kick the idle load balancer, which then does idle
8649 * load balancing for all the idle CPUs.
8650 + * - HK_FLAG_MISC CPUs are used for this task, because HK_FLAG_SCHED not set
8651 + * anywhere yet.
8652 */
8653
8654 static inline int find_new_ilb(void)
8655 {
8656 - int ilb = cpumask_first(nohz.idle_cpus_mask);
8657 + int ilb;
8658
8659 - if (ilb < nr_cpu_ids && idle_cpu(ilb))
8660 - return ilb;
8661 + for_each_cpu_and(ilb, nohz.idle_cpus_mask,
8662 + housekeeping_cpumask(HK_FLAG_MISC)) {
8663 + if (idle_cpu(ilb))
8664 + return ilb;
8665 + }
8666
8667 return nr_cpu_ids;
8668 }
8669
8670 /*
8671 - * Kick a CPU to do the nohz balancing, if it is time for it. We pick the
8672 - * nohz_load_balancer CPU (if there is one) otherwise fallback to any idle
8673 - * CPU (if there is one).
8674 + * Kick a CPU to do the nohz balancing, if it is time for it. We pick any
8675 + * idle CPU in the HK_FLAG_MISC housekeeping set (if there is one).
8676 */
8677 static void kick_ilb(unsigned int flags)
8678 {
8679 diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
8680 index 2e2955a8cf8f..b980cc96604f 100644
8681 --- a/kernel/sched/rt.c
8682 +++ b/kernel/sched/rt.c
8683 @@ -2559,6 +2559,8 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us)
8684 rt_runtime = (u64)rt_runtime_us * NSEC_PER_USEC;
8685 if (rt_runtime_us < 0)
8686 rt_runtime = RUNTIME_INF;
8687 + else if ((u64)rt_runtime_us > U64_MAX / NSEC_PER_USEC)
8688 + return -EINVAL;
8689
8690 return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
8691 }
8692 @@ -2579,6 +2581,9 @@ int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
8693 {
8694 u64 rt_runtime, rt_period;
8695
8696 + if (rt_period_us > U64_MAX / NSEC_PER_USEC)
8697 + return -EINVAL;
8698 +
8699 rt_period = rt_period_us * NSEC_PER_USEC;
8700 rt_runtime = tg->rt_bandwidth.rt_runtime;
8701
8702 diff --git a/kernel/time/time.c b/kernel/time/time.c
8703 index ccdb351277ee..be057d6579f1 100644
8704 --- a/kernel/time/time.c
8705 +++ b/kernel/time/time.c
8706 @@ -172,7 +172,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz
8707 static int firsttime = 1;
8708 int error = 0;
8709
8710 - if (tv && !timespec64_valid(tv))
8711 + if (tv && !timespec64_valid_settod(tv))
8712 return -EINVAL;
8713
8714 error = security_settime64(tv, tz);
8715 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
8716 index 7846ce24ecc0..9a6bfcd22dc6 100644
8717 --- a/kernel/time/timekeeping.c
8718 +++ b/kernel/time/timekeeping.c
8719 @@ -1242,7 +1242,7 @@ int do_settimeofday64(const struct timespec64 *ts)
8720 unsigned long flags;
8721 int ret = 0;
8722
8723 - if (!timespec64_valid_strict(ts))
8724 + if (!timespec64_valid_settod(ts))
8725 return -EINVAL;
8726
8727 raw_spin_lock_irqsave(&timekeeper_lock, flags);
8728 @@ -1299,7 +1299,7 @@ static int timekeeping_inject_offset(const struct timespec64 *ts)
8729 /* Make sure the proposed value is valid */
8730 tmp = timespec64_add(tk_xtime(tk), *ts);
8731 if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
8732 - !timespec64_valid_strict(&tmp)) {
8733 + !timespec64_valid_settod(&tmp)) {
8734 ret = -EINVAL;
8735 goto error;
8736 }
8737 @@ -1556,7 +1556,7 @@ void __init timekeeping_init(void)
8738 unsigned long flags;
8739
8740 read_persistent_wall_and_boot_offset(&wall_time, &boot_offset);
8741 - if (timespec64_valid_strict(&wall_time) &&
8742 + if (timespec64_valid_settod(&wall_time) &&
8743 timespec64_to_ns(&wall_time) > 0) {
8744 persistent_clock_exists = true;
8745 } else if (timespec64_to_ns(&wall_time) != 0) {
8746 diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c
8747 index 4ad967453b6f..3ea65cdff30d 100644
8748 --- a/kernel/trace/trace_branch.c
8749 +++ b/kernel/trace/trace_branch.c
8750 @@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect)
8751 void ftrace_likely_update(struct ftrace_likely_data *f, int val,
8752 int expect, int is_constant)
8753 {
8754 + unsigned long flags = user_access_save();
8755 +
8756 /* A constant is always correct */
8757 if (is_constant) {
8758 f->constant++;
8759 @@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
8760 f->data.correct++;
8761 else
8762 f->data.incorrect++;
8763 +
8764 + user_access_restore(flags);
8765 }
8766 EXPORT_SYMBOL(ftrace_likely_update);
8767
8768 diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
8769 index 63d0816ab23b..7761f3294339 100644
8770 --- a/lib/kobject_uevent.c
8771 +++ b/lib/kobject_uevent.c
8772 @@ -464,6 +464,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
8773 int i = 0;
8774 int retval = 0;
8775
8776 + /*
8777 + * Mark "remove" event done regardless of result, for some subsystems
8778 + * do not want to re-trigger "remove" event via automatic cleanup.
8779 + */
8780 + if (action == KOBJ_REMOVE)
8781 + kobj->state_remove_uevent_sent = 1;
8782 +
8783 pr_debug("kobject: '%s' (%p): %s\n",
8784 kobject_name(kobj), kobj, __func__);
8785
8786 @@ -565,10 +572,6 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
8787 kobj->state_add_uevent_sent = 1;
8788 break;
8789
8790 - case KOBJ_REMOVE:
8791 - kobj->state_remove_uevent_sent = 1;
8792 - break;
8793 -
8794 case KOBJ_UNBIND:
8795 zap_modalias_env(env);
8796 break;
8797 diff --git a/lib/sbitmap.c b/lib/sbitmap.c
8798 index fdd1b8aa8ac6..0572ac340325 100644
8799 --- a/lib/sbitmap.c
8800 +++ b/lib/sbitmap.c
8801 @@ -356,7 +356,7 @@ static void sbitmap_queue_update_wake_batch(struct sbitmap_queue *sbq,
8802 * to ensure that the batch size is updated before the wait
8803 * counts.
8804 */
8805 - smp_mb__before_atomic();
8806 + smp_mb();
8807 for (i = 0; i < SBQ_WAIT_QUEUES; i++)
8808 atomic_set(&sbq->ws[i].wait_cnt, 1);
8809 }
8810 diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
8811 index b53e1b5d80f4..e304b54c9c7d 100644
8812 --- a/lib/strncpy_from_user.c
8813 +++ b/lib/strncpy_from_user.c
8814 @@ -23,10 +23,11 @@
8815 * hit it), 'max' is the address space maximum (and we return
8816 * -EFAULT if we hit it).
8817 */
8818 -static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
8819 +static inline long do_strncpy_from_user(char *dst, const char __user *src,
8820 + unsigned long count, unsigned long max)
8821 {
8822 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
8823 - long res = 0;
8824 + unsigned long res = 0;
8825
8826 /*
8827 * Truncate 'max' to the user-specified limit, so that
8828 diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
8829 index 60d0bbda8f5e..184f80f7bacf 100644
8830 --- a/lib/strnlen_user.c
8831 +++ b/lib/strnlen_user.c
8832 @@ -28,7 +28,7 @@
8833 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
8834 {
8835 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
8836 - long align, res = 0;
8837 + unsigned long align, res = 0;
8838 unsigned long c;
8839
8840 /*
8841 @@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
8842 * Do everything aligned. But that means that we
8843 * need to also expand the maximum..
8844 */
8845 - align = (sizeof(long) - 1) & (unsigned long)src;
8846 + align = (sizeof(unsigned long) - 1) & (unsigned long)src;
8847 src -= align;
8848 max += align;
8849
8850 diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
8851 index a60bacf7120b..2895e3b26e93 100644
8852 --- a/net/batman-adv/distributed-arp-table.c
8853 +++ b/net/batman-adv/distributed-arp-table.c
8854 @@ -1394,7 +1394,6 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
8855 hw_src, &ip_src, hw_dst, &ip_dst,
8856 dat_entry->mac_addr, &dat_entry->ip);
8857 dropped = true;
8858 - goto out;
8859 }
8860
8861 /* Update our internal cache with both the IP addresses the node got
8862 @@ -1403,6 +1402,9 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
8863 batadv_dat_entry_add(bat_priv, ip_src, hw_src, vid);
8864 batadv_dat_entry_add(bat_priv, ip_dst, hw_dst, vid);
8865
8866 + if (dropped)
8867 + goto out;
8868 +
8869 /* If BLA is enabled, only forward ARP replies if we have claimed the
8870 * source of the ARP reply or if no one else of the same backbone has
8871 * already claimed that client. This prevents that different gateways
8872 diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
8873 index 69c0d85bceb3..79b8a2d8793e 100644
8874 --- a/net/batman-adv/main.c
8875 +++ b/net/batman-adv/main.c
8876 @@ -160,6 +160,7 @@ int batadv_mesh_init(struct net_device *soft_iface)
8877 spin_lock_init(&bat_priv->tt.commit_lock);
8878 spin_lock_init(&bat_priv->gw.list_lock);
8879 #ifdef CONFIG_BATMAN_ADV_MCAST
8880 + spin_lock_init(&bat_priv->mcast.mla_lock);
8881 spin_lock_init(&bat_priv->mcast.want_lists_lock);
8882 #endif
8883 spin_lock_init(&bat_priv->tvlv.container_list_lock);
8884 diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
8885 index 86725d792e15..b90fe25d6b0b 100644
8886 --- a/net/batman-adv/multicast.c
8887 +++ b/net/batman-adv/multicast.c
8888 @@ -325,8 +325,6 @@ static void batadv_mcast_mla_list_free(struct hlist_head *mcast_list)
8889 * translation table except the ones listed in the given mcast_list.
8890 *
8891 * If mcast_list is NULL then all are retracted.
8892 - *
8893 - * Do not call outside of the mcast worker! (or cancel mcast worker first)
8894 */
8895 static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
8896 struct hlist_head *mcast_list)
8897 @@ -334,8 +332,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
8898 struct batadv_hw_addr *mcast_entry;
8899 struct hlist_node *tmp;
8900
8901 - WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
8902 -
8903 hlist_for_each_entry_safe(mcast_entry, tmp, &bat_priv->mcast.mla_list,
8904 list) {
8905 if (mcast_list &&
8906 @@ -359,8 +355,6 @@ static void batadv_mcast_mla_tt_retract(struct batadv_priv *bat_priv,
8907 *
8908 * Adds multicast listener announcements from the given mcast_list to the
8909 * translation table if they have not been added yet.
8910 - *
8911 - * Do not call outside of the mcast worker! (or cancel mcast worker first)
8912 */
8913 static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
8914 struct hlist_head *mcast_list)
8915 @@ -368,8 +362,6 @@ static void batadv_mcast_mla_tt_add(struct batadv_priv *bat_priv,
8916 struct batadv_hw_addr *mcast_entry;
8917 struct hlist_node *tmp;
8918
8919 - WARN_ON(delayed_work_pending(&bat_priv->mcast.work));
8920 -
8921 if (!mcast_list)
8922 return;
8923
8924 @@ -658,7 +650,10 @@ static void batadv_mcast_mla_update(struct work_struct *work)
8925 priv_mcast = container_of(delayed_work, struct batadv_priv_mcast, work);
8926 bat_priv = container_of(priv_mcast, struct batadv_priv, mcast);
8927
8928 + spin_lock(&bat_priv->mcast.mla_lock);
8929 __batadv_mcast_mla_update(bat_priv);
8930 + spin_unlock(&bat_priv->mcast.mla_lock);
8931 +
8932 batadv_mcast_start_timer(bat_priv);
8933 }
8934
8935 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
8936 index 343d304851a5..eeee3e61c625 100644
8937 --- a/net/batman-adv/types.h
8938 +++ b/net/batman-adv/types.h
8939 @@ -1215,6 +1215,11 @@ struct batadv_priv_mcast {
8940 /** @bridged: whether the soft interface has a bridge on top */
8941 unsigned char bridged:1;
8942
8943 + /**
8944 + * @mla_lock: a lock protecting mla_list and mla_flags
8945 + */
8946 + spinlock_t mla_lock;
8947 +
8948 /**
8949 * @num_want_all_unsnoopables: number of nodes wanting unsnoopable IP
8950 * traffic
8951 diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
8952 index a06f03047717..5afd67ef797a 100644
8953 --- a/net/bluetooth/hci_core.c
8954 +++ b/net/bluetooth/hci_core.c
8955 @@ -4274,6 +4274,9 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
8956 return;
8957 }
8958
8959 + /* If we reach this point this event matches the last command sent */
8960 + hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
8961 +
8962 /* If the command succeeded and there's still more commands in
8963 * this request the request is not yet complete.
8964 */
8965 @@ -4384,6 +4387,8 @@ static void hci_cmd_work(struct work_struct *work)
8966
8967 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
8968 if (hdev->sent_cmd) {
8969 + if (hci_req_status_pend(hdev))
8970 + hci_dev_set_flag(hdev, HCI_CMD_PENDING);
8971 atomic_dec(&hdev->cmd_cnt);
8972 hci_send_frame(hdev, skb);
8973 if (test_bit(HCI_RESET, &hdev->flags))
8974 diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
8975 index 7f800c3480f7..3e7badb3ac2d 100644
8976 --- a/net/bluetooth/hci_event.c
8977 +++ b/net/bluetooth/hci_event.c
8978 @@ -3357,6 +3357,12 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
8979 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
8980 req_complete_skb);
8981
8982 + if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
8983 + bt_dev_err(hdev,
8984 + "unexpected event for opcode 0x%4.4x", *opcode);
8985 + return;
8986 + }
8987 +
8988 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
8989 queue_work(hdev->workqueue, &hdev->cmd_work);
8990 }
8991 @@ -3464,6 +3470,12 @@ static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
8992 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
8993 req_complete_skb);
8994
8995 + if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) {
8996 + bt_dev_err(hdev,
8997 + "unexpected event for opcode 0x%4.4x", *opcode);
8998 + return;
8999 + }
9000 +
9001 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
9002 queue_work(hdev->workqueue, &hdev->cmd_work);
9003 }
9004 diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c
9005 index e8c9ef1e1922..9448ebd3780a 100644
9006 --- a/net/bluetooth/hci_request.c
9007 +++ b/net/bluetooth/hci_request.c
9008 @@ -46,6 +46,11 @@ void hci_req_purge(struct hci_request *req)
9009 skb_queue_purge(&req->cmd_q);
9010 }
9011
9012 +bool hci_req_status_pend(struct hci_dev *hdev)
9013 +{
9014 + return hdev->req_status == HCI_REQ_PEND;
9015 +}
9016 +
9017 static int req_run(struct hci_request *req, hci_req_complete_t complete,
9018 hci_req_complete_skb_t complete_skb)
9019 {
9020 diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h
9021 index 692cc8b13368..55b2050cc9ff 100644
9022 --- a/net/bluetooth/hci_request.h
9023 +++ b/net/bluetooth/hci_request.h
9024 @@ -37,6 +37,7 @@ struct hci_request {
9025
9026 void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
9027 void hci_req_purge(struct hci_request *req);
9028 +bool hci_req_status_pend(struct hci_dev *hdev);
9029 int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
9030 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
9031 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
9032 diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
9033 index b1a2c5e38530..37b4667128a3 100644
9034 --- a/net/core/sysctl_net_core.c
9035 +++ b/net/core/sysctl_net_core.c
9036 @@ -279,7 +279,6 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
9037 return ret;
9038 }
9039
9040 -# ifdef CONFIG_HAVE_EBPF_JIT
9041 static int
9042 proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
9043 void __user *buffer, size_t *lenp,
9044 @@ -290,7 +289,6 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
9045
9046 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
9047 }
9048 -# endif
9049 #endif
9050
9051 static struct ctl_table net_core_table[] = {
9052 @@ -397,6 +395,14 @@ static struct ctl_table net_core_table[] = {
9053 .extra2 = &one,
9054 },
9055 # endif
9056 + {
9057 + .procname = "bpf_jit_limit",
9058 + .data = &bpf_jit_limit,
9059 + .maxlen = sizeof(int),
9060 + .mode = 0600,
9061 + .proc_handler = proc_dointvec_minmax_bpf_restricted,
9062 + .extra1 = &one,
9063 + },
9064 #endif
9065 {
9066 .procname = "netdev_tstamp_prequeue",
9067 diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
9068 index 3dbecae4be73..2ac749c4a6b2 100644
9069 --- a/net/mac80211/mlme.c
9070 +++ b/net/mac80211/mlme.c
9071 @@ -1156,9 +1156,6 @@ static void ieee80211_chswitch_work(struct work_struct *work)
9072 goto out;
9073 }
9074
9075 - /* XXX: shouldn't really modify cfg80211-owned data! */
9076 - ifmgd->associated->channel = sdata->csa_chandef.chan;
9077 -
9078 ifmgd->csa_waiting_bcn = true;
9079
9080 ieee80211_sta_reset_beacon_monitor(sdata);
9081 diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
9082 index 048e004ed0ee..c6711ead5e59 100644
9083 --- a/net/wireless/nl80211.c
9084 +++ b/net/wireless/nl80211.c
9085 @@ -15441,6 +15441,11 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
9086
9087 wdev->chandef = *chandef;
9088 wdev->preset_chandef = *chandef;
9089 +
9090 + if (wdev->iftype == NL80211_IFTYPE_STATION &&
9091 + !WARN_ON(!wdev->current_bss))
9092 + wdev->current_bss->pub.channel = chandef->chan;
9093 +
9094 nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL,
9095 NL80211_CMD_CH_SWITCH_NOTIFY, 0);
9096 }
9097 diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
9098 index 186e727b737b..6fd9954e1c08 100644
9099 --- a/security/selinux/netlabel.c
9100 +++ b/security/selinux/netlabel.c
9101 @@ -288,11 +288,8 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
9102 int rc;
9103 struct netlbl_lsm_secattr secattr;
9104 struct sk_security_struct *sksec = ep->base.sk->sk_security;
9105 - struct sockaddr *addr;
9106 struct sockaddr_in addr4;
9107 -#if IS_ENABLED(CONFIG_IPV6)
9108 struct sockaddr_in6 addr6;
9109 -#endif
9110
9111 if (ep->base.sk->sk_family != PF_INET &&
9112 ep->base.sk->sk_family != PF_INET6)
9113 @@ -310,16 +307,15 @@ int selinux_netlbl_sctp_assoc_request(struct sctp_endpoint *ep,
9114 if (ip_hdr(skb)->version == 4) {
9115 addr4.sin_family = AF_INET;
9116 addr4.sin_addr.s_addr = ip_hdr(skb)->saddr;
9117 - addr = (struct sockaddr *)&addr4;
9118 -#if IS_ENABLED(CONFIG_IPV6)
9119 - } else {
9120 + rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr4, &secattr);
9121 + } else if (IS_ENABLED(CONFIG_IPV6) && ip_hdr(skb)->version == 6) {
9122 addr6.sin6_family = AF_INET6;
9123 addr6.sin6_addr = ipv6_hdr(skb)->saddr;
9124 - addr = (struct sockaddr *)&addr6;
9125 -#endif
9126 + rc = netlbl_conn_setattr(ep->base.sk, (void *)&addr6, &secattr);
9127 + } else {
9128 + rc = -EAFNOSUPPORT;
9129 }
9130
9131 - rc = netlbl_conn_setattr(ep->base.sk, addr, &secattr);
9132 if (rc == 0)
9133 sksec->nlbl_state = NLBL_LABELED;
9134
9135 diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
9136 index d5f73c837281..7994e8ddc7d2 100644
9137 --- a/sound/soc/codecs/hdmi-codec.c
9138 +++ b/sound/soc/codecs/hdmi-codec.c
9139 @@ -439,8 +439,12 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream,
9140 if (!ret) {
9141 ret = snd_pcm_hw_constraint_eld(substream->runtime,
9142 hcp->eld);
9143 - if (ret)
9144 + if (ret) {
9145 + mutex_lock(&hcp->current_stream_lock);
9146 + hcp->current_stream = NULL;
9147 + mutex_unlock(&hcp->current_stream_lock);
9148 return ret;
9149 + }
9150 }
9151 /* Select chmap supported */
9152 hdmi_codec_eld_chmap(hcp);
9153 diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
9154 index f70db8412c7c..160b2764b2ad 100644
9155 --- a/sound/soc/davinci/davinci-mcasp.c
9156 +++ b/sound/soc/davinci/davinci-mcasp.c
9157 @@ -43,6 +43,7 @@
9158
9159 #define MCASP_MAX_AFIFO_DEPTH 64
9160
9161 +#ifdef CONFIG_PM
9162 static u32 context_regs[] = {
9163 DAVINCI_MCASP_TXFMCTL_REG,
9164 DAVINCI_MCASP_RXFMCTL_REG,
9165 @@ -65,6 +66,7 @@ struct davinci_mcasp_context {
9166 u32 *xrsr_regs; /* for serializer configuration */
9167 bool pm_state;
9168 };
9169 +#endif
9170
9171 struct davinci_mcasp_ruledata {
9172 struct davinci_mcasp *mcasp;
9173 diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig
9174 index 2e75b5bc5f1d..f721cd4e3f97 100644
9175 --- a/sound/soc/fsl/Kconfig
9176 +++ b/sound/soc/fsl/Kconfig
9177 @@ -173,16 +173,17 @@ config SND_MPC52xx_SOC_EFIKA
9178
9179 endif # SND_POWERPC_SOC
9180
9181 +config SND_SOC_IMX_PCM_FIQ
9182 + tristate
9183 + default y if SND_SOC_IMX_SSI=y && (SND_SOC_FSL_SSI=m || SND_SOC_FSL_SPDIF=m) && (MXC_TZIC || MXC_AVIC)
9184 + select FIQ
9185 +
9186 if SND_IMX_SOC
9187
9188 config SND_SOC_IMX_SSI
9189 tristate
9190 select SND_SOC_FSL_UTILS
9191
9192 -config SND_SOC_IMX_PCM_FIQ
9193 - tristate
9194 - select FIQ
9195 -
9196 comment "SoC Audio support for Freescale i.MX boards:"
9197
9198 config SND_MXC_SOC_WM1133_EV1
9199 diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
9200 index 191426a6d9ad..30a3d68b5c03 100644
9201 --- a/sound/soc/fsl/eukrea-tlv320.c
9202 +++ b/sound/soc/fsl/eukrea-tlv320.c
9203 @@ -118,13 +118,13 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
9204 if (ret) {
9205 dev_err(&pdev->dev,
9206 "fsl,mux-int-port node missing or invalid.\n");
9207 - return ret;
9208 + goto err;
9209 }
9210 ret = of_property_read_u32(np, "fsl,mux-ext-port", &ext_port);
9211 if (ret) {
9212 dev_err(&pdev->dev,
9213 "fsl,mux-ext-port node missing or invalid.\n");
9214 - return ret;
9215 + goto err;
9216 }
9217
9218 /*
9219 diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
9220 index 4163f2cfc06f..bfc5b21d0c3f 100644
9221 --- a/sound/soc/fsl/fsl_sai.c
9222 +++ b/sound/soc/fsl/fsl_sai.c
9223 @@ -268,12 +268,14 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
9224 case SND_SOC_DAIFMT_CBS_CFS:
9225 val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
9226 val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
9227 + sai->is_slave_mode = false;
9228 break;
9229 case SND_SOC_DAIFMT_CBM_CFM:
9230 sai->is_slave_mode = true;
9231 break;
9232 case SND_SOC_DAIFMT_CBS_CFM:
9233 val_cr2 |= FSL_SAI_CR2_BCD_MSTR;
9234 + sai->is_slave_mode = false;
9235 break;
9236 case SND_SOC_DAIFMT_CBM_CFS:
9237 val_cr4 |= FSL_SAI_CR4_FSD_MSTR;
9238 diff --git a/sound/soc/fsl/fsl_utils.c b/sound/soc/fsl/fsl_utils.c
9239 index 7f0fa4b52223..cca33ab7020a 100644
9240 --- a/sound/soc/fsl/fsl_utils.c
9241 +++ b/sound/soc/fsl/fsl_utils.c
9242 @@ -71,6 +71,7 @@ int fsl_asoc_get_dma_channel(struct device_node *ssi_np,
9243 iprop = of_get_property(dma_np, "cell-index", NULL);
9244 if (!iprop) {
9245 of_node_put(dma_np);
9246 + of_node_put(dma_channel_np);
9247 return -EINVAL;
9248 }
9249 *dma_id = be32_to_cpup(iprop);
9250 diff --git a/sound/soc/intel/boards/kbl_da7219_max98357a.c b/sound/soc/intel/boards/kbl_da7219_max98357a.c
9251 index 38f6ab74709d..07491a0f8fb8 100644
9252 --- a/sound/soc/intel/boards/kbl_da7219_max98357a.c
9253 +++ b/sound/soc/intel/boards/kbl_da7219_max98357a.c
9254 @@ -188,7 +188,7 @@ static int kabylake_da7219_codec_init(struct snd_soc_pcm_runtime *rtd)
9255
9256 jack = &ctx->kabylake_headset;
9257
9258 - snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_MEDIA);
9259 + snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE);
9260 snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOLUMEUP);
9261 snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEDOWN);
9262 snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOICECOMMAND);
9263 diff --git a/tools/bpf/bpftool/.gitignore b/tools/bpf/bpftool/.gitignore
9264 index 67167e44b726..8248b8dd89d4 100644
9265 --- a/tools/bpf/bpftool/.gitignore
9266 +++ b/tools/bpf/bpftool/.gitignore
9267 @@ -1,5 +1,5 @@
9268 *.d
9269 -bpftool
9270 +/bpftool
9271 bpftool*.8
9272 bpf-helpers.*
9273 FEATURE-DUMP.bpftool
9274 diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
9275 index 7a0014794bff..dd0b68d1f4be 100644
9276 --- a/tools/lib/bpf/bpf.c
9277 +++ b/tools/lib/bpf/bpf.c
9278 @@ -53,6 +53,8 @@
9279 # define __NR_bpf 349
9280 # elif defined(__s390__)
9281 # define __NR_bpf 351
9282 +# elif defined(__arc__)
9283 +# define __NR_bpf 280
9284 # else
9285 # error __NR_bpf not defined. libbpf does not support your arch.
9286 # endif
9287 diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
9288 index 6f38164b2618..c3145ab3bdca 100644
9289 --- a/tools/lib/bpf/bpf.h
9290 +++ b/tools/lib/bpf/bpf.h
9291 @@ -26,6 +26,7 @@
9292 #include <linux/bpf.h>
9293 #include <stdbool.h>
9294 #include <stddef.h>
9295 +#include <stdint.h>
9296
9297 struct bpf_create_map_attr {
9298 const char *name;
9299 diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
9300 index 8fcd1c076add..cbd55f5f8d59 100644
9301 --- a/tools/testing/selftests/bpf/test_libbpf_open.c
9302 +++ b/tools/testing/selftests/bpf/test_libbpf_open.c
9303 @@ -11,6 +11,8 @@ static const char *__doc__ =
9304 #include <bpf/libbpf.h>
9305 #include <getopt.h>
9306
9307 +#include "bpf_rlimit.h"
9308 +
9309 static const struct option long_options[] = {
9310 {"help", no_argument, NULL, 'h' },
9311 {"debug", no_argument, NULL, 'D' },
9312 diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c
9313 index cabe2a3a3b30..cf156b353679 100644
9314 --- a/tools/testing/selftests/bpf/trace_helpers.c
9315 +++ b/tools/testing/selftests/bpf/trace_helpers.c
9316 @@ -51,6 +51,10 @@ struct ksym *ksym_search(long key)
9317 int start = 0, end = sym_cnt;
9318 int result;
9319
9320 + /* kallsyms not loaded. return NULL */
9321 + if (sym_cnt <= 0)
9322 + return NULL;
9323 +
9324 while (start < end) {
9325 size_t mid = start + (end - start) / 2;
9326
9327 diff --git a/tools/testing/selftests/cgroup/test_memcontrol.c b/tools/testing/selftests/cgroup/test_memcontrol.c
9328 index 28d321ba311b..6f339882a6ca 100644
9329 --- a/tools/testing/selftests/cgroup/test_memcontrol.c
9330 +++ b/tools/testing/selftests/cgroup/test_memcontrol.c
9331 @@ -26,7 +26,7 @@
9332 */
9333 static int test_memcg_subtree_control(const char *root)
9334 {
9335 - char *parent, *child, *parent2, *child2;
9336 + char *parent, *child, *parent2 = NULL, *child2 = NULL;
9337 int ret = KSFT_FAIL;
9338 char buf[PAGE_SIZE];
9339
9340 @@ -34,50 +34,54 @@ static int test_memcg_subtree_control(const char *root)
9341 parent = cg_name(root, "memcg_test_0");
9342 child = cg_name(root, "memcg_test_0/memcg_test_1");
9343 if (!parent || !child)
9344 - goto cleanup;
9345 + goto cleanup_free;
9346
9347 if (cg_create(parent))
9348 - goto cleanup;
9349 + goto cleanup_free;
9350
9351 if (cg_write(parent, "cgroup.subtree_control", "+memory"))
9352 - goto cleanup;
9353 + goto cleanup_parent;
9354
9355 if (cg_create(child))
9356 - goto cleanup;
9357 + goto cleanup_parent;
9358
9359 if (cg_read_strstr(child, "cgroup.controllers", "memory"))
9360 - goto cleanup;
9361 + goto cleanup_child;
9362
9363 /* Create two nested cgroups without enabling memory controller */
9364 parent2 = cg_name(root, "memcg_test_1");
9365 child2 = cg_name(root, "memcg_test_1/memcg_test_1");
9366 if (!parent2 || !child2)
9367 - goto cleanup;
9368 + goto cleanup_free2;
9369
9370 if (cg_create(parent2))
9371 - goto cleanup;
9372 + goto cleanup_free2;
9373
9374 if (cg_create(child2))
9375 - goto cleanup;
9376 + goto cleanup_parent2;
9377
9378 if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf)))
9379 - goto cleanup;
9380 + goto cleanup_all;
9381
9382 if (!cg_read_strstr(child2, "cgroup.controllers", "memory"))
9383 - goto cleanup;
9384 + goto cleanup_all;
9385
9386 ret = KSFT_PASS;
9387
9388 -cleanup:
9389 - cg_destroy(child);
9390 - cg_destroy(parent);
9391 - free(parent);
9392 - free(child);
9393 -
9394 +cleanup_all:
9395 cg_destroy(child2);
9396 +cleanup_parent2:
9397 cg_destroy(parent2);
9398 +cleanup_free2:
9399 free(parent2);
9400 free(child2);
9401 +cleanup_child:
9402 + cg_destroy(child);
9403 +cleanup_parent:
9404 + cg_destroy(parent);
9405 +cleanup_free:
9406 + free(parent);
9407 + free(child);
9408
9409 return ret;
9410 }