Magellan Linux

Contents of /trunk/kernel-alx/patches-4.19/0133-4.19.34-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3412 - (show annotations) (download)
Fri Aug 2 11:47:43 2019 UTC (4 years, 9 months ago) by niro
File size: 211095 byte(s)
-linux-4.19.34
1 diff --git a/Documentation/arm/kernel_mode_neon.txt b/Documentation/arm/kernel_mode_neon.txt
2 index 525452726d31..b9e060c5b61e 100644
3 --- a/Documentation/arm/kernel_mode_neon.txt
4 +++ b/Documentation/arm/kernel_mode_neon.txt
5 @@ -6,7 +6,7 @@ TL;DR summary
6 * Use only NEON instructions, or VFP instructions that don't rely on support
7 code
8 * Isolate your NEON code in a separate compilation unit, and compile it with
9 - '-mfpu=neon -mfloat-abi=softfp'
10 + '-march=armv7-a -mfpu=neon -mfloat-abi=softfp'
11 * Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
12 NEON code
13 * Don't sleep in your NEON code, and be aware that it will be executed with
14 @@ -87,7 +87,7 @@ instructions appearing in unexpected places if no special care is taken.
15 Therefore, the recommended and only supported way of using NEON/VFP in the
16 kernel is by adhering to the following rules:
17 * isolate the NEON code in a separate compilation unit and compile it with
18 - '-mfpu=neon -mfloat-abi=softfp';
19 + '-march=armv7-a -mfpu=neon -mfloat-abi=softfp';
20 * issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
21 into the unit containing the NEON code from a compilation unit which is *not*
22 built with the GCC flag '-mfpu=neon' set.
23 diff --git a/Makefile b/Makefile
24 index 8de5fab711d8..8fdfe0af5862 100644
25 --- a/Makefile
26 +++ b/Makefile
27 @@ -1,7 +1,7 @@
28 # SPDX-License-Identifier: GPL-2.0
29 VERSION = 4
30 PATCHLEVEL = 19
31 -SUBLEVEL = 33
32 +SUBLEVEL = 34
33 EXTRAVERSION =
34 NAME = "People's Front"
35
36 @@ -626,12 +626,15 @@ ifeq ($(may-sync-config),1)
37 -include include/config/auto.conf.cmd
38
39 # To avoid any implicit rule to kick in, define an empty command
40 -$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
41 +$(KCONFIG_CONFIG): ;
42
43 # The actual configuration files used during the build are stored in
44 # include/generated/ and include/config/. Update them if .config is newer than
45 # include/config/auto.conf (which mirrors .config).
46 -include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
47 +#
48 +# This exploits the 'multi-target pattern rule' trick.
49 +# The syncconfig should be executed only once to make all the targets.
50 +%/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG)
51 $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
52 else
53 # External modules and some install targets need include/generated/autoconf.h
54 diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
55 index abff7ef7c9cd..4981741377f3 100644
56 --- a/arch/arm/boot/dts/lpc32xx.dtsi
57 +++ b/arch/arm/boot/dts/lpc32xx.dtsi
58 @@ -230,7 +230,7 @@
59 status = "disabled";
60 };
61
62 - i2s1: i2s@2009C000 {
63 + i2s1: i2s@2009c000 {
64 compatible = "nxp,lpc3220-i2s";
65 reg = <0x2009C000 0x1000>;
66 };
67 @@ -273,7 +273,7 @@
68 status = "disabled";
69 };
70
71 - i2c1: i2c@400A0000 {
72 + i2c1: i2c@400a0000 {
73 compatible = "nxp,pnx-i2c";
74 reg = <0x400A0000 0x100>;
75 interrupt-parent = <&sic1>;
76 @@ -284,7 +284,7 @@
77 clocks = <&clk LPC32XX_CLK_I2C1>;
78 };
79
80 - i2c2: i2c@400A8000 {
81 + i2c2: i2c@400a8000 {
82 compatible = "nxp,pnx-i2c";
83 reg = <0x400A8000 0x100>;
84 interrupt-parent = <&sic1>;
85 @@ -295,7 +295,7 @@
86 clocks = <&clk LPC32XX_CLK_I2C2>;
87 };
88
89 - mpwm: mpwm@400E8000 {
90 + mpwm: mpwm@400e8000 {
91 compatible = "nxp,lpc3220-motor-pwm";
92 reg = <0x400E8000 0x78>;
93 status = "disabled";
94 @@ -394,7 +394,7 @@
95 #gpio-cells = <3>; /* bank, pin, flags */
96 };
97
98 - timer4: timer@4002C000 {
99 + timer4: timer@4002c000 {
100 compatible = "nxp,lpc3220-timer";
101 reg = <0x4002C000 0x1000>;
102 interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
103 @@ -412,7 +412,7 @@
104 status = "disabled";
105 };
106
107 - watchdog: watchdog@4003C000 {
108 + watchdog: watchdog@4003c000 {
109 compatible = "nxp,pnx4008-wdt";
110 reg = <0x4003C000 0x1000>;
111 clocks = <&clk LPC32XX_CLK_WDOG>;
112 @@ -451,7 +451,7 @@
113 status = "disabled";
114 };
115
116 - timer1: timer@4004C000 {
117 + timer1: timer@4004c000 {
118 compatible = "nxp,lpc3220-timer";
119 reg = <0x4004C000 0x1000>;
120 interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
121 @@ -475,7 +475,7 @@
122 status = "disabled";
123 };
124
125 - pwm1: pwm@4005C000 {
126 + pwm1: pwm@4005c000 {
127 compatible = "nxp,lpc3220-pwm";
128 reg = <0x4005C000 0x4>;
129 clocks = <&clk LPC32XX_CLK_PWM1>;
130 @@ -484,7 +484,7 @@
131 status = "disabled";
132 };
133
134 - pwm2: pwm@4005C004 {
135 + pwm2: pwm@4005c004 {
136 compatible = "nxp,lpc3220-pwm";
137 reg = <0x4005C004 0x4>;
138 clocks = <&clk LPC32XX_CLK_PWM2>;
139 diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
140 index 08f7f6be7254..5b3e5c50c72f 100644
141 --- a/arch/arm/boot/dts/meson8b.dtsi
142 +++ b/arch/arm/boot/dts/meson8b.dtsi
143 @@ -207,9 +207,7 @@
144 groups = "eth_tx_clk",
145 "eth_tx_en",
146 "eth_txd1_0",
147 - "eth_txd1_1",
148 "eth_txd0_0",
149 - "eth_txd0_1",
150 "eth_rx_clk",
151 "eth_rx_dv",
152 "eth_rxd1",
153 @@ -218,7 +216,9 @@
154 "eth_mdc",
155 "eth_ref_clk",
156 "eth_txd2",
157 - "eth_txd3";
158 + "eth_txd3",
159 + "eth_rxd3",
160 + "eth_rxd2";
161 function = "ethernet";
162 };
163 };
164 diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
165 index 69772e742a0a..83ae97c049d9 100644
166 --- a/arch/arm/include/asm/barrier.h
167 +++ b/arch/arm/include/asm/barrier.h
168 @@ -11,6 +11,8 @@
169 #define sev() __asm__ __volatile__ ("sev" : : : "memory")
170 #define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
171 #define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
172 +#else
173 +#define wfe() do { } while (0)
174 #endif
175
176 #if __LINUX_ARM_ARCH__ >= 7
177 diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
178 index 1bf65b47808a..cb2a3423b714 100644
179 --- a/arch/arm/include/asm/processor.h
180 +++ b/arch/arm/include/asm/processor.h
181 @@ -95,7 +95,11 @@ extern void release_thread(struct task_struct *);
182 unsigned long get_wchan(struct task_struct *p);
183
184 #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
185 -#define cpu_relax() smp_mb()
186 +#define cpu_relax() \
187 + do { \
188 + smp_mb(); \
189 + __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
190 + } while (0)
191 #else
192 #define cpu_relax() barrier()
193 #endif
194 diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
195 index 187ccf6496ad..2cb00d15831b 100644
196 --- a/arch/arm/include/asm/v7m.h
197 +++ b/arch/arm/include/asm/v7m.h
198 @@ -49,7 +49,7 @@
199 * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
200 */
201 #define EXC_RET_STACK_MASK 0x00000004
202 -#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
203 +#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2)
204
205 /* Cache related definitions */
206
207 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
208 index 773424843d6e..62db1c9746cb 100644
209 --- a/arch/arm/kernel/entry-header.S
210 +++ b/arch/arm/kernel/entry-header.S
211 @@ -127,7 +127,8 @@
212 */
213 .macro v7m_exception_slow_exit ret_r0
214 cpsid i
215 - ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
216 + ldr lr, =exc_ret
217 + ldr lr, [lr]
218
219 @ read original r12, sp, lr, pc and xPSR
220 add r12, sp, #S_IP
221 diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
222 index abcf47848525..19d2dcd6530d 100644
223 --- a/arch/arm/kernel/entry-v7m.S
224 +++ b/arch/arm/kernel/entry-v7m.S
225 @@ -146,3 +146,7 @@ ENTRY(vector_table)
226 .rept CONFIG_CPU_V7M_NUM_IRQ
227 .long __irq_entry @ External Interrupts
228 .endr
229 + .align 2
230 + .globl exc_ret
231 +exc_ret:
232 + .space 4
233 diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
234 index dd2eb5f76b9f..76300f3813e8 100644
235 --- a/arch/arm/kernel/machine_kexec.c
236 +++ b/arch/arm/kernel/machine_kexec.c
237 @@ -91,8 +91,11 @@ void machine_crash_nonpanic_core(void *unused)
238
239 set_cpu_online(smp_processor_id(), false);
240 atomic_dec(&waiting_for_crash_ipi);
241 - while (1)
242 +
243 + while (1) {
244 cpu_relax();
245 + wfe();
246 + }
247 }
248
249 void crash_smp_send_stop(void)
250 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
251 index 1d6f5ea522f4..a3ce7c5365fa 100644
252 --- a/arch/arm/kernel/smp.c
253 +++ b/arch/arm/kernel/smp.c
254 @@ -604,8 +604,10 @@ static void ipi_cpu_stop(unsigned int cpu)
255 local_fiq_disable();
256 local_irq_disable();
257
258 - while (1)
259 + while (1) {
260 cpu_relax();
261 + wfe();
262 + }
263 }
264
265 static DEFINE_PER_CPU(struct completion *, cpu_completion);
266 diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
267 index 0bee233fef9a..314cfb232a63 100644
268 --- a/arch/arm/kernel/unwind.c
269 +++ b/arch/arm/kernel/unwind.c
270 @@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
271 static const struct unwind_idx *__origin_unwind_idx;
272 extern const struct unwind_idx __stop_unwind_idx[];
273
274 -static DEFINE_SPINLOCK(unwind_lock);
275 +static DEFINE_RAW_SPINLOCK(unwind_lock);
276 static LIST_HEAD(unwind_tables);
277
278 /* Convert a prel31 symbol to an absolute address */
279 @@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
280 /* module unwind tables */
281 struct unwind_table *table;
282
283 - spin_lock_irqsave(&unwind_lock, flags);
284 + raw_spin_lock_irqsave(&unwind_lock, flags);
285 list_for_each_entry(table, &unwind_tables, list) {
286 if (addr >= table->begin_addr &&
287 addr < table->end_addr) {
288 @@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
289 break;
290 }
291 }
292 - spin_unlock_irqrestore(&unwind_lock, flags);
293 + raw_spin_unlock_irqrestore(&unwind_lock, flags);
294 }
295
296 pr_debug("%s: idx = %p\n", __func__, idx);
297 @@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
298 tab->begin_addr = text_addr;
299 tab->end_addr = text_addr + text_size;
300
301 - spin_lock_irqsave(&unwind_lock, flags);
302 + raw_spin_lock_irqsave(&unwind_lock, flags);
303 list_add_tail(&tab->list, &unwind_tables);
304 - spin_unlock_irqrestore(&unwind_lock, flags);
305 + raw_spin_unlock_irqrestore(&unwind_lock, flags);
306
307 return tab;
308 }
309 @@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
310 if (!tab)
311 return;
312
313 - spin_lock_irqsave(&unwind_lock, flags);
314 + raw_spin_lock_irqsave(&unwind_lock, flags);
315 list_del(&tab->list);
316 - spin_unlock_irqrestore(&unwind_lock, flags);
317 + raw_spin_unlock_irqrestore(&unwind_lock, flags);
318
319 kfree(tab);
320 }
321 diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
322 index ad25fd1872c7..0bff0176db2c 100644
323 --- a/arch/arm/lib/Makefile
324 +++ b/arch/arm/lib/Makefile
325 @@ -39,7 +39,7 @@ $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
326 $(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
327
328 ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
329 - NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon
330 + NEON_FLAGS := -march=armv7-a -mfloat-abi=softfp -mfpu=neon
331 CFLAGS_xor-neon.o += $(NEON_FLAGS)
332 obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
333 endif
334 diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
335 index 2c40aeab3eaa..c691b901092f 100644
336 --- a/arch/arm/lib/xor-neon.c
337 +++ b/arch/arm/lib/xor-neon.c
338 @@ -14,7 +14,7 @@
339 MODULE_LICENSE("GPL");
340
341 #ifndef __ARM_NEON__
342 -#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
343 +#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
344 #endif
345
346 /*
347 diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
348 index 058a37e6d11c..fd6e0671f957 100644
349 --- a/arch/arm/mach-omap2/prm_common.c
350 +++ b/arch/arm/mach-omap2/prm_common.c
351 @@ -523,8 +523,10 @@ void omap_prm_reset_system(void)
352
353 prm_ll_data->reset_system();
354
355 - while (1)
356 + while (1) {
357 cpu_relax();
358 + wfe();
359 + }
360 }
361
362 /**
363 diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
364 index 47a5acc64433..92e84181933a 100644
365 --- a/arch/arm/mm/proc-v7m.S
366 +++ b/arch/arm/mm/proc-v7m.S
367 @@ -139,6 +139,9 @@ __v7m_setup_cont:
368 cpsie i
369 svc #0
370 1: cpsid i
371 + ldr r0, =exc_ret
372 + orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
373 + str lr, [r0]
374 ldmia sp, {r0-r3, r12}
375 str r5, [r12, #11 * 4] @ restore the original SVC vector entry
376 mov lr, r6 @ restore LR
377 diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
378 index 50b30ff30de4..a4c134677285 100644
379 --- a/arch/arm64/mm/fault.c
380 +++ b/arch/arm64/mm/fault.c
381 @@ -827,11 +827,12 @@ void __init hook_debug_fault_code(int nr,
382 debug_fault_info[nr].name = name;
383 }
384
385 -asmlinkage int __exception do_debug_exception(unsigned long addr,
386 +asmlinkage int __exception do_debug_exception(unsigned long addr_if_watchpoint,
387 unsigned int esr,
388 struct pt_regs *regs)
389 {
390 const struct fault_info *inf = debug_fault_info + DBG_ESR_EVT(esr);
391 + unsigned long pc = instruction_pointer(regs);
392 int rv;
393
394 /*
395 @@ -841,10 +842,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
396 if (interrupts_enabled(regs))
397 trace_hardirqs_off();
398
399 - if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
400 + if (user_mode(regs) && pc > TASK_SIZE)
401 arm64_apply_bp_hardening();
402
403 - if (!inf->fn(addr, esr, regs)) {
404 + if (!inf->fn(addr_if_watchpoint, esr, regs)) {
405 rv = 1;
406 } else {
407 struct siginfo info;
408 @@ -853,7 +854,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
409 info.si_signo = inf->sig;
410 info.si_errno = 0;
411 info.si_code = inf->code;
412 - info.si_addr = (void __user *)addr;
413 + info.si_addr = (void __user *)pc;
414 arm64_notify_die(inf->name, regs, &info, esr);
415 rv = 0;
416 }
417 diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
418 index 58634e6bae92..55f251810129 100644
419 --- a/arch/h8300/Makefile
420 +++ b/arch/h8300/Makefile
421 @@ -27,7 +27,7 @@ KBUILD_LDFLAGS += $(ldflags-y)
422 CHECKFLAGS += -msize-long
423
424 ifeq ($(CROSS_COMPILE),)
425 -CROSS_COMPILE := h8300-unknown-linux-
426 +CROSS_COMPILE := $(call cc-cross-prefix, h8300-unknown-linux- h8300-linux-)
427 endif
428
429 core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
430 diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
431 index a4a718dbfec6..f85e2b01c3df 100644
432 --- a/arch/powerpc/include/asm/topology.h
433 +++ b/arch/powerpc/include/asm/topology.h
434 @@ -132,6 +132,8 @@ static inline void shared_proc_topology_init(void) {}
435 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
436 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
437 #define topology_core_id(cpu) (cpu_to_core_id(cpu))
438 +
439 +int dlpar_cpu_readd(int cpu);
440 #endif
441 #endif
442
443 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
444 index c806a3c12592..7a46e0e57a36 100644
445 --- a/arch/powerpc/kernel/entry_64.S
446 +++ b/arch/powerpc/kernel/entry_64.S
447 @@ -994,6 +994,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
448 ld r2,_NIP(r1)
449 mtspr SPRN_SRR0,r2
450
451 + /*
452 + * Leaving a stale exception_marker on the stack can confuse
453 + * the reliable stack unwinder later on. Clear it.
454 + */
455 + li r2,0
456 + std r2,STACK_FRAME_OVERHEAD-16(r1)
457 +
458 ld r0,GPR0(r1)
459 ld r2,GPR2(r1)
460 ld r3,GPR3(r1)
461 diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
462 index 2486bee0f93e..97c7a39ebc00 100644
463 --- a/arch/powerpc/mm/hugetlbpage-radix.c
464 +++ b/arch/powerpc/mm/hugetlbpage-radix.c
465 @@ -1,6 +1,7 @@
466 // SPDX-License-Identifier: GPL-2.0
467 #include <linux/mm.h>
468 #include <linux/hugetlb.h>
469 +#include <linux/security.h>
470 #include <asm/pgtable.h>
471 #include <asm/pgalloc.h>
472 #include <asm/cacheflush.h>
473 @@ -73,7 +74,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
474 if (addr) {
475 addr = ALIGN(addr, huge_page_size(h));
476 vma = find_vma(mm, addr);
477 - if (high_limit - len >= addr &&
478 + if (high_limit - len >= addr && addr >= mmap_min_addr &&
479 (!vma || addr + len <= vm_start_gap(vma)))
480 return addr;
481 }
482 @@ -83,7 +84,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
483 */
484 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
485 info.length = len;
486 - info.low_limit = PAGE_SIZE;
487 + info.low_limit = max(PAGE_SIZE, mmap_min_addr);
488 info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
489 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
490 info.align_offset = 0;
491 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
492 index 5500e4edabc6..10fb43efef50 100644
493 --- a/arch/powerpc/mm/numa.c
494 +++ b/arch/powerpc/mm/numa.c
495 @@ -1461,13 +1461,6 @@ static void reset_topology_timer(void)
496
497 #ifdef CONFIG_SMP
498
499 -static void stage_topology_update(int core_id)
500 -{
501 - cpumask_or(&cpu_associativity_changes_mask,
502 - &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
503 - reset_topology_timer();
504 -}
505 -
506 static int dt_update_callback(struct notifier_block *nb,
507 unsigned long action, void *data)
508 {
509 @@ -1480,7 +1473,7 @@ static int dt_update_callback(struct notifier_block *nb,
510 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
511 u32 core_id;
512 of_property_read_u32(update->dn, "reg", &core_id);
513 - stage_topology_update(core_id);
514 + rc = dlpar_cpu_readd(core_id);
515 rc = NOTIFY_OK;
516 }
517 break;
518 diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
519 index 7639b2168755..f5adb6b756f7 100644
520 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
521 +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
522 @@ -313,7 +313,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
523 page_shift);
524 tbl->it_level_size = 1ULL << (level_shift - 3);
525 tbl->it_indirect_levels = levels - 1;
526 - tbl->it_allocated_size = total_allocated;
527 tbl->it_userspace = uas;
528 tbl->it_nid = nid;
529
530 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
531 index cde710297a4e..326ca6288bb1 100644
532 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
533 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
534 @@ -2603,8 +2603,13 @@ static long pnv_pci_ioda2_create_table_userspace(
535 int num, __u32 page_shift, __u64 window_size, __u32 levels,
536 struct iommu_table **ptbl)
537 {
538 - return pnv_pci_ioda2_create_table(table_group,
539 + long ret = pnv_pci_ioda2_create_table(table_group,
540 num, page_shift, window_size, levels, true, ptbl);
541 +
542 + if (!ret)
543 + (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size(
544 + page_shift, window_size, levels);
545 + return ret;
546 }
547
548 static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
549 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
550 index 6ef77caf7bcf..1d3f9313c02f 100644
551 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
552 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
553 @@ -802,6 +802,25 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
554 return rc;
555 }
556
557 +int dlpar_cpu_readd(int cpu)
558 +{
559 + struct device_node *dn;
560 + struct device *dev;
561 + u32 drc_index;
562 + int rc;
563 +
564 + dev = get_cpu_device(cpu);
565 + dn = dev->of_node;
566 +
567 + rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
568 +
569 + rc = dlpar_cpu_remove_by_index(drc_index);
570 + if (!rc)
571 + rc = dlpar_cpu_add(drc_index);
572 +
573 + return rc;
574 +}
575 +
576 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
577 {
578 u32 count, drc_index;
579 diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
580 index 9deea5ee13f6..27f1e6415036 100644
581 --- a/arch/powerpc/xmon/ppc-dis.c
582 +++ b/arch/powerpc/xmon/ppc-dis.c
583 @@ -158,7 +158,7 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr)
584 dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
585 | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM
586 | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2
587 - | PPC_OPCODE_VSX | PPC_OPCODE_VSX3),
588 + | PPC_OPCODE_VSX | PPC_OPCODE_VSX3);
589
590 /* Get the major opcode of the insn. */
591 opcode = NULL;
592 diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
593 index 5c53e977be62..44404836e9d1 100644
594 --- a/arch/s390/kernel/perf_cpum_sf.c
595 +++ b/arch/s390/kernel/perf_cpum_sf.c
596 @@ -1600,7 +1600,7 @@ static void aux_sdb_init(unsigned long sdb)
597
598 /*
599 * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
600 - * @cpu: On which to allocate, -1 means current
601 + * @event: Event the buffer is setup for, event->cpu == -1 means current
602 * @pages: Array of pointers to buffer pages passed from perf core
603 * @nr_pages: Total pages
604 * @snapshot: Flag for snapshot mode
605 @@ -1612,8 +1612,8 @@ static void aux_sdb_init(unsigned long sdb)
606 *
607 * Return the private AUX buffer structure if success or NULL if fails.
608 */
609 -static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
610 - bool snapshot)
611 +static void *aux_buffer_setup(struct perf_event *event, void **pages,
612 + int nr_pages, bool snapshot)
613 {
614 struct sf_buffer *sfb;
615 struct aux_buffer *aux;
616 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
617 index 9b5adae9cc40..e2839b5c246c 100644
618 --- a/arch/x86/boot/Makefile
619 +++ b/arch/x86/boot/Makefile
620 @@ -100,7 +100,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE
621 AFLAGS_header.o += -I$(objtree)/$(obj)
622 $(obj)/header.o: $(obj)/zoffset.h
623
624 -LDFLAGS_setup.elf := -T
625 +LDFLAGS_setup.elf := -m elf_i386 -T
626 $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
627 $(call if_changed,ld)
628
629 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
630 index f95dcb209fdf..617df50a11d9 100644
631 --- a/arch/x86/entry/entry_64.S
632 +++ b/arch/x86/entry/entry_64.S
633 @@ -627,6 +627,7 @@ ENTRY(interrupt_entry)
634
635 ret
636 END(interrupt_entry)
637 +_ASM_NOKPROBE(interrupt_entry)
638
639
640 /* Interrupt entry/exit. */
641 @@ -826,6 +827,7 @@ native_irq_return_ldt:
642 jmp native_irq_return_iret
643 #endif
644 END(common_interrupt)
645 +_ASM_NOKPROBE(common_interrupt)
646
647 /*
648 * APIC interrupts.
649 @@ -840,6 +842,7 @@ ENTRY(\sym)
650 call \do_sym /* rdi points to pt_regs */
651 jmp ret_from_intr
652 END(\sym)
653 +_ASM_NOKPROBE(\sym)
654 .endm
655
656 /* Make sure APIC interrupt handlers end up in the irqentry section: */
657 @@ -984,6 +987,7 @@ ENTRY(\sym)
658
659 jmp error_exit
660 .endif
661 +_ASM_NOKPROBE(\sym)
662 END(\sym)
663 .endm
664
665 diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
666 index 24ffa1e88cf9..7139f6bf27ad 100644
667 --- a/arch/x86/events/intel/bts.c
668 +++ b/arch/x86/events/intel/bts.c
669 @@ -77,10 +77,12 @@ static size_t buf_size(struct page *page)
670 }
671
672 static void *
673 -bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
674 +bts_buffer_setup_aux(struct perf_event *event, void **pages,
675 + int nr_pages, bool overwrite)
676 {
677 struct bts_buffer *buf;
678 struct page *page;
679 + int cpu = event->cpu;
680 int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
681 unsigned long offset;
682 size_t size = nr_pages << PAGE_SHIFT;
683 diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
684 index 8d016ce5b80d..8f4c98fdd03c 100644
685 --- a/arch/x86/events/intel/pt.c
686 +++ b/arch/x86/events/intel/pt.c
687 @@ -1104,10 +1104,11 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
688 * Return: Our private PT buffer structure.
689 */
690 static void *
691 -pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
692 +pt_buffer_setup_aux(struct perf_event *event, void **pages,
693 + int nr_pages, bool snapshot)
694 {
695 struct pt_buffer *buf;
696 - int node, ret;
697 + int node, ret, cpu = event->cpu;
698
699 if (!nr_pages)
700 return NULL;
701 diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
702 index 20c876c7c5bf..87abd5145cc9 100644
703 --- a/arch/x86/hyperv/hv_init.c
704 +++ b/arch/x86/hyperv/hv_init.c
705 @@ -387,6 +387,13 @@ void hyperv_cleanup(void)
706 /* Reset our OS id */
707 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
708
709 + /*
710 + * Reset hypercall page reference before reset the page,
711 + * let hypercall operations fail safely rather than
712 + * panic the kernel for using invalid hypercall page
713 + */
714 + hv_hypercall_pg = NULL;
715 +
716 /* Reset the hypercall page */
717 hypercall_msr.as_uint64 = 0;
718 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
719 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
720 index 5dd3317d761f..c63bab98780c 100644
721 --- a/arch/x86/kernel/vmlinux.lds.S
722 +++ b/arch/x86/kernel/vmlinux.lds.S
723 @@ -411,7 +411,7 @@ SECTIONS
724 * Per-cpu symbols which need to be offset from __per_cpu_load
725 * for the boot processor.
726 */
727 -#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
728 +#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
729 INIT_PER_CPU(gdt_page);
730 INIT_PER_CPU(irq_stack_union);
731
732 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
733 index 4463fa72db94..96cb20de08af 100644
734 --- a/arch/x86/realmode/rm/Makefile
735 +++ b/arch/x86/realmode/rm/Makefile
736 @@ -47,7 +47,7 @@ $(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
737 targets += realmode.lds
738 $(obj)/realmode.lds: $(obj)/pasyms.h
739
740 -LDFLAGS_realmode.elf := --emit-relocs -T
741 +LDFLAGS_realmode.elf := -m elf_i386 --emit-relocs -T
742 CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj)
743
744 targets += realmode.elf
745 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
746 index 653100fb719e..c5e2c5a01182 100644
747 --- a/block/bfq-iosched.c
748 +++ b/block/bfq-iosched.c
749 @@ -2215,7 +2215,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
750
751 if (in_service_bfqq && in_service_bfqq != bfqq &&
752 likely(in_service_bfqq != &bfqd->oom_bfqq) &&
753 - bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
754 + bfq_rq_close_to_sector(io_struct, request,
755 + bfqd->in_serv_last_pos) &&
756 bfqq->entity.parent == in_service_bfqq->entity.parent &&
757 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
758 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
759 @@ -2755,6 +2756,8 @@ update_rate_and_reset:
760 bfq_update_rate_reset(bfqd, rq);
761 update_last_values:
762 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
763 + if (RQ_BFQQ(rq) == bfqd->in_service_queue)
764 + bfqd->in_serv_last_pos = bfqd->last_position;
765 bfqd->last_dispatch = now_ns;
766 }
767
768 diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
769 index a8a2e5aca4d4..d5e9e60cb1a5 100644
770 --- a/block/bfq-iosched.h
771 +++ b/block/bfq-iosched.h
772 @@ -469,6 +469,9 @@ struct bfq_data {
773 /* on-disk position of the last served request */
774 sector_t last_position;
775
776 + /* position of the last served request for the in-service queue */
777 + sector_t in_serv_last_pos;
778 +
779 /* time of last request completion (ns) */
780 u64 last_completion;
781
782 diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
783 index f0b52266b3ac..d73afb562ad9 100644
784 --- a/drivers/acpi/acpi_video.c
785 +++ b/drivers/acpi/acpi_video.c
786 @@ -2124,21 +2124,29 @@ static int __init intel_opregion_present(void)
787 return opregion;
788 }
789
790 +/* Check if the chassis-type indicates there is no builtin LCD panel */
791 static bool dmi_is_desktop(void)
792 {
793 const char *chassis_type;
794 + unsigned long type;
795
796 chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
797 if (!chassis_type)
798 return false;
799
800 - if (!strcmp(chassis_type, "3") || /* 3: Desktop */
801 - !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
802 - !strcmp(chassis_type, "5") || /* 5: Pizza Box */
803 - !strcmp(chassis_type, "6") || /* 6: Mini Tower */
804 - !strcmp(chassis_type, "7") || /* 7: Tower */
805 - !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
806 + if (kstrtoul(chassis_type, 10, &type) != 0)
807 + return false;
808 +
809 + switch (type) {
810 + case 0x03: /* Desktop */
811 + case 0x04: /* Low Profile Desktop */
812 + case 0x05: /* Pizza Box */
813 + case 0x06: /* Mini Tower */
814 + case 0x07: /* Tower */
815 + case 0x10: /* Lunch Box */
816 + case 0x11: /* Main Server Chassis */
817 return true;
818 + }
819
820 return false;
821 }
822 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
823 index 0c5aeab4d23a..a63da9e07341 100644
824 --- a/drivers/block/loop.c
825 +++ b/drivers/block/loop.c
826 @@ -1090,16 +1090,12 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
827 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
828 }
829 mapping_set_gfp_mask(filp->f_mapping, gfp);
830 - lo->lo_state = Lo_unbound;
831 /* This is safe: open() is still holding a reference. */
832 module_put(THIS_MODULE);
833 blk_mq_unfreeze_queue(lo->lo_queue);
834
835 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
836 lo_number = lo->lo_number;
837 - lo->lo_flags = 0;
838 - if (!part_shift)
839 - lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
840 loop_unprepare_queue(lo);
841 out_unlock:
842 mutex_unlock(&loop_ctl_mutex);
843 @@ -1121,6 +1117,23 @@ out_unlock:
844 /* Device is gone, no point in returning error */
845 err = 0;
846 }
847 +
848 + /*
849 + * lo->lo_state is set to Lo_unbound here after above partscan has
850 + * finished.
851 + *
852 + * There cannot be anybody else entering __loop_clr_fd() as
853 + * lo->lo_backing_file is already cleared and Lo_rundown state
854 + * protects us from all the other places trying to change the 'lo'
855 + * device.
856 + */
857 + mutex_lock(&loop_ctl_mutex);
858 + lo->lo_flags = 0;
859 + if (!part_shift)
860 + lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
861 + lo->lo_state = Lo_unbound;
862 + mutex_unlock(&loop_ctl_mutex);
863 +
864 /*
865 * Need not hold loop_ctl_mutex to fput backing file.
866 * Calling fput holding loop_ctl_mutex triggers a circular
867 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
868 index 10802d1fc554..27a82a559ab9 100644
869 --- a/drivers/cdrom/cdrom.c
870 +++ b/drivers/cdrom/cdrom.c
871 @@ -265,6 +265,7 @@
872 /* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
873 /* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
874
875 +#include <linux/atomic.h>
876 #include <linux/module.h>
877 #include <linux/fs.h>
878 #include <linux/major.h>
879 @@ -3693,9 +3694,9 @@ static struct ctl_table_header *cdrom_sysctl_header;
880
881 static void cdrom_sysctl_register(void)
882 {
883 - static int initialized;
884 + static atomic_t initialized = ATOMIC_INIT(0);
885
886 - if (initialized == 1)
887 + if (!atomic_add_unless(&initialized, 1, 1))
888 return;
889
890 cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
891 @@ -3706,8 +3707,6 @@ static void cdrom_sysctl_register(void)
892 cdrom_sysctl_settings.debug = debug;
893 cdrom_sysctl_settings.lock = lockdoor;
894 cdrom_sysctl_settings.check = check_media_type;
895 -
896 - initialized = 1;
897 }
898
899 static void cdrom_sysctl_unregister(void)
900 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
901 index 4a22b4b41aef..9bffcd37cc7b 100644
902 --- a/drivers/char/hpet.c
903 +++ b/drivers/char/hpet.c
904 @@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str)
905 pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
906 return 1;
907 }
908 -__setup("hpet_mmap", hpet_mmap_enable);
909 +__setup("hpet_mmap=", hpet_mmap_enable);
910
911 static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
912 {
913 diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
914 index b89df66ea1ae..7abd604e938c 100644
915 --- a/drivers/char/hw_random/virtio-rng.c
916 +++ b/drivers/char/hw_random/virtio-rng.c
917 @@ -73,7 +73,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
918
919 if (!vi->busy) {
920 vi->busy = true;
921 - init_completion(&vi->have_data);
922 + reinit_completion(&vi->have_data);
923 register_buffer(vi, buf, size);
924 }
925
926 diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
927 index fdf625fb10fa..083daa293280 100644
928 --- a/drivers/clk/clk-fractional-divider.c
929 +++ b/drivers/clk/clk-fractional-divider.c
930 @@ -77,7 +77,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
931 unsigned long m, n;
932 u64 ret;
933
934 - if (!rate || rate >= *parent_rate)
935 + if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
936 return *parent_rate;
937
938 if (fd->approximation)
939 diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
940 index f965845917e3..258c8d259ea1 100644
941 --- a/drivers/clk/meson/meson-aoclk.c
942 +++ b/drivers/clk/meson/meson-aoclk.c
943 @@ -65,15 +65,20 @@ int meson_aoclkc_probe(struct platform_device *pdev)
944 return ret;
945 }
946
947 - /*
948 - * Populate regmap and register all clks
949 - */
950 - for (clkid = 0; clkid < data->num_clks; clkid++) {
951 + /* Populate regmap */
952 + for (clkid = 0; clkid < data->num_clks; clkid++)
953 data->clks[clkid]->map = regmap;
954
955 + /* Register all clks */
956 + for (clkid = 0; clkid < data->hw_data->num; clkid++) {
957 + if (!data->hw_data->hws[clkid])
958 + continue;
959 +
960 ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
961 - if (ret)
962 + if (ret) {
963 + dev_err(dev, "Clock registration failed\n");
964 return ret;
965 + }
966 }
967
968 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
969 diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
970 index 2c5426607790..e431661fe874 100644
971 --- a/drivers/clk/rockchip/clk-rk3328.c
972 +++ b/drivers/clk/rockchip/clk-rk3328.c
973 @@ -78,17 +78,17 @@ static struct rockchip_pll_rate_table rk3328_pll_rates[] = {
974
975 static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = {
976 /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
977 - RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217),
978 + RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134218),
979 /* vco = 1016064000 */
980 - RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088),
981 + RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671089),
982 /* vco = 983040000 */
983 - RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088),
984 + RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671089),
985 /* vco = 983040000 */
986 - RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088),
987 + RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671089),
988 /* vco = 860156000 */
989 - RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894),
990 + RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797895),
991 /* vco = 903168000 */
992 - RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329),
993 + RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066330),
994 /* vco = 819200000 */
995 { /* sentinel */ },
996 };
997 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
998 index b61f4ec43e06..aca30f45172e 100644
999 --- a/drivers/cpufreq/acpi-cpufreq.c
1000 +++ b/drivers/cpufreq/acpi-cpufreq.c
1001 @@ -911,8 +911,10 @@ static void __init acpi_cpufreq_boost_init(void)
1002 {
1003 int ret;
1004
1005 - if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
1006 + if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
1007 + pr_debug("Boost capabilities not present in the processor\n");
1008 return;
1009 + }
1010
1011 acpi_cpufreq_driver.set_boost = set_boost;
1012 acpi_cpufreq_driver.boost_enabled = boost_state(0);
1013 diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
1014 index 5e63742b0d22..53ab1f140a26 100644
1015 --- a/drivers/crypto/amcc/crypto4xx_trng.c
1016 +++ b/drivers/crypto/amcc/crypto4xx_trng.c
1017 @@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
1018
1019 /* Find the TRNG device node and map it */
1020 trng = of_find_matching_node(NULL, ppc4xx_trng_match);
1021 - if (!trng || !of_device_is_available(trng))
1022 + if (!trng || !of_device_is_available(trng)) {
1023 + of_node_put(trng);
1024 return;
1025 + }
1026
1027 dev->trng_base = of_iomap(trng, 0);
1028 of_node_put(trng);
1029 diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
1030 index be055b9547f6..6183f9128a8a 100644
1031 --- a/drivers/crypto/cavium/zip/zip_main.c
1032 +++ b/drivers/crypto/cavium/zip/zip_main.c
1033 @@ -351,6 +351,7 @@ static struct pci_driver zip_driver = {
1034
1035 static struct crypto_alg zip_comp_deflate = {
1036 .cra_name = "deflate",
1037 + .cra_driver_name = "deflate-cavium",
1038 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
1039 .cra_ctxsize = sizeof(struct zip_kernel_ctx),
1040 .cra_priority = 300,
1041 @@ -365,6 +366,7 @@ static struct crypto_alg zip_comp_deflate = {
1042
1043 static struct crypto_alg zip_comp_lzs = {
1044 .cra_name = "lzs",
1045 + .cra_driver_name = "lzs-cavium",
1046 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
1047 .cra_ctxsize = sizeof(struct zip_kernel_ctx),
1048 .cra_priority = 300,
1049 @@ -384,7 +386,7 @@ static struct scomp_alg zip_scomp_deflate = {
1050 .decompress = zip_scomp_decompress,
1051 .base = {
1052 .cra_name = "deflate",
1053 - .cra_driver_name = "deflate-scomp",
1054 + .cra_driver_name = "deflate-scomp-cavium",
1055 .cra_module = THIS_MODULE,
1056 .cra_priority = 300,
1057 }
1058 @@ -397,7 +399,7 @@ static struct scomp_alg zip_scomp_lzs = {
1059 .decompress = zip_scomp_decompress,
1060 .base = {
1061 .cra_name = "lzs",
1062 - .cra_driver_name = "lzs-scomp",
1063 + .cra_driver_name = "lzs-scomp-cavium",
1064 .cra_module = THIS_MODULE,
1065 .cra_priority = 300,
1066 }
1067 diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
1068 index 118d371a2a4a..dfee0d895ce3 100644
1069 --- a/drivers/dma/imx-dma.c
1070 +++ b/drivers/dma/imx-dma.c
1071 @@ -284,7 +284,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
1072 struct scatterlist *sg = d->sg;
1073 unsigned long now;
1074
1075 - now = min(d->len, sg_dma_len(sg));
1076 + now = min_t(size_t, d->len, sg_dma_len(sg));
1077 if (d->len != IMX_DMA_LENGTH_LOOP)
1078 d->len -= now;
1079
1080 diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
1081 index 43d4b00b8138..411f91fde734 100644
1082 --- a/drivers/dma/qcom/hidma.c
1083 +++ b/drivers/dma/qcom/hidma.c
1084 @@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan)
1085 desc = &mdesc->desc;
1086 last_cookie = desc->cookie;
1087
1088 + llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
1089 +
1090 spin_lock_irqsave(&mchan->lock, irqflags);
1091 + if (llstat == DMA_COMPLETE) {
1092 + mchan->last_success = last_cookie;
1093 + result.result = DMA_TRANS_NOERROR;
1094 + } else {
1095 + result.result = DMA_TRANS_ABORTED;
1096 + }
1097 +
1098 dma_cookie_complete(desc);
1099 spin_unlock_irqrestore(&mchan->lock, irqflags);
1100
1101 - llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
1102 dmaengine_desc_get_callback(desc, &cb);
1103
1104 dma_run_dependencies(desc);
1105
1106 spin_lock_irqsave(&mchan->lock, irqflags);
1107 list_move(&mdesc->node, &mchan->free);
1108 -
1109 - if (llstat == DMA_COMPLETE) {
1110 - mchan->last_success = last_cookie;
1111 - result.result = DMA_TRANS_NOERROR;
1112 - } else
1113 - result.result = DMA_TRANS_ABORTED;
1114 -
1115 spin_unlock_irqrestore(&mchan->lock, irqflags);
1116
1117 dmaengine_desc_callback_invoke(&cb, &result);
1118 @@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
1119 if (!mdesc)
1120 return NULL;
1121
1122 + mdesc->desc.flags = flags;
1123 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
1124 src, dest, len, flags,
1125 HIDMA_TRE_MEMCPY);
1126 @@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
1127 if (!mdesc)
1128 return NULL;
1129
1130 + mdesc->desc.flags = flags;
1131 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
1132 value, dest, len, flags,
1133 HIDMA_TRE_MEMSET);
1134 diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
1135 index 9a558e30c461..8219ab88a507 100644
1136 --- a/drivers/dma/tegra20-apb-dma.c
1137 +++ b/drivers/dma/tegra20-apb-dma.c
1138 @@ -636,7 +636,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
1139
1140 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
1141 dma_desc = sgreq->dma_desc;
1142 - dma_desc->bytes_transferred += sgreq->req_len;
1143 + /* if we dma for long enough the transfer count will wrap */
1144 + dma_desc->bytes_transferred =
1145 + (dma_desc->bytes_transferred + sgreq->req_len) %
1146 + dma_desc->bytes_requested;
1147
1148 /* Callback need to be call */
1149 if (!dma_desc->cb_count)
1150 diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
1151 index a7902fccdcfa..6090d25dce85 100644
1152 --- a/drivers/firmware/efi/cper.c
1153 +++ b/drivers/firmware/efi/cper.c
1154 @@ -546,19 +546,24 @@ EXPORT_SYMBOL_GPL(cper_estatus_check_header);
1155 int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
1156 {
1157 struct acpi_hest_generic_data *gdata;
1158 - unsigned int data_len, gedata_len;
1159 + unsigned int data_len, record_size;
1160 int rc;
1161
1162 rc = cper_estatus_check_header(estatus);
1163 if (rc)
1164 return rc;
1165 +
1166 data_len = estatus->data_length;
1167
1168 apei_estatus_for_each_section(estatus, gdata) {
1169 - gedata_len = acpi_hest_get_error_length(gdata);
1170 - if (gedata_len > data_len - acpi_hest_get_size(gdata))
1171 + if (sizeof(struct acpi_hest_generic_data) > data_len)
1172 + return -EINVAL;
1173 +
1174 + record_size = acpi_hest_get_record_size(gdata);
1175 + if (record_size > data_len)
1176 return -EINVAL;
1177 - data_len -= acpi_hest_get_record_size(gdata);
1178 +
1179 + data_len -= record_size;
1180 }
1181 if (data_len)
1182 return -EINVAL;
1183 diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
1184 index 6920033de6d4..6c09644d620e 100644
1185 --- a/drivers/firmware/efi/libstub/arm-stub.c
1186 +++ b/drivers/firmware/efi/libstub/arm-stub.c
1187 @@ -340,6 +340,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
1188 paddr = in->phys_addr;
1189 size = in->num_pages * EFI_PAGE_SIZE;
1190
1191 + if (novamap()) {
1192 + in->virt_addr = in->phys_addr;
1193 + continue;
1194 + }
1195 +
1196 /*
1197 * Make the mapping compatible with 64k pages: this allows
1198 * a 4k page size kernel to kexec a 64k page size kernel and
1199 diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
1200 index e94975f4655b..442f51c2a53d 100644
1201 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c
1202 +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
1203 @@ -34,6 +34,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
1204
1205 static int __section(.data) __nokaslr;
1206 static int __section(.data) __quiet;
1207 +static int __section(.data) __novamap;
1208
1209 int __pure nokaslr(void)
1210 {
1211 @@ -43,6 +44,10 @@ int __pure is_quiet(void)
1212 {
1213 return __quiet;
1214 }
1215 +int __pure novamap(void)
1216 +{
1217 + return __novamap;
1218 +}
1219
1220 #define EFI_MMAP_NR_SLACK_SLOTS 8
1221
1222 @@ -482,6 +487,11 @@ efi_status_t efi_parse_options(char const *cmdline)
1223 __chunk_size = -1UL;
1224 }
1225
1226 + if (!strncmp(str, "novamap", 7)) {
1227 + str += strlen("novamap");
1228 + __novamap = 1;
1229 + }
1230 +
1231 /* Group words together, delimited by "," */
1232 while (*str && *str != ' ' && *str != ',')
1233 str++;
1234 diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
1235 index 32799cf039ef..337b52c4702c 100644
1236 --- a/drivers/firmware/efi/libstub/efistub.h
1237 +++ b/drivers/firmware/efi/libstub/efistub.h
1238 @@ -27,6 +27,7 @@
1239
1240 extern int __pure nokaslr(void);
1241 extern int __pure is_quiet(void);
1242 +extern int __pure novamap(void);
1243
1244 #define pr_efi(sys_table, msg) do { \
1245 if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \
1246 diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
1247 index 0c0d2312f4a8..dba296a44f4e 100644
1248 --- a/drivers/firmware/efi/libstub/fdt.c
1249 +++ b/drivers/firmware/efi/libstub/fdt.c
1250 @@ -327,6 +327,9 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
1251 if (status == EFI_SUCCESS) {
1252 efi_set_virtual_address_map_t *svam;
1253
1254 + if (novamap())
1255 + return EFI_SUCCESS;
1256 +
1257 /* Install the new virtual address map */
1258 svam = sys_table->runtime->set_virtual_address_map;
1259 status = svam(runtime_entry_count * desc_size, desc_size,
1260 diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
1261 index 8986757eafaf..aac972b056d9 100644
1262 --- a/drivers/firmware/efi/memattr.c
1263 +++ b/drivers/firmware/efi/memattr.c
1264 @@ -94,7 +94,7 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
1265
1266 if (!(md->attribute & EFI_MEMORY_RUNTIME))
1267 continue;
1268 - if (md->virt_addr == 0) {
1269 + if (md->virt_addr == 0 && md->phys_addr != 0) {
1270 /* no virtual mapping has been installed by the stub */
1271 break;
1272 }
1273 diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
1274 index e81008678a38..6c1acf642c8e 100644
1275 --- a/drivers/gpio/gpio-omap.c
1276 +++ b/drivers/gpio/gpio-omap.c
1277 @@ -888,14 +888,16 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
1278 if (trigger)
1279 omap_set_gpio_triggering(bank, offset, trigger);
1280
1281 - /* For level-triggered GPIOs, the clearing must be done after
1282 - * the HW source is cleared, thus after the handler has run */
1283 - if (bank->level_mask & BIT(offset)) {
1284 - omap_set_gpio_irqenable(bank, offset, 0);
1285 + omap_set_gpio_irqenable(bank, offset, 1);
1286 +
1287 + /*
1288 + * For level-triggered GPIOs, clearing must be done after the source
1289 + * is cleared, thus after the handler has run. OMAP4 needs this done
1290 + * after enabing the interrupt to clear the wakeup status.
1291 + */
1292 + if (bank->level_mask & BIT(offset))
1293 omap_clear_gpio_irqstatus(bank, offset);
1294 - }
1295
1296 - omap_set_gpio_irqenable(bank, offset, 1);
1297 raw_spin_unlock_irqrestore(&bank->lock, flags);
1298 }
1299
1300 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1301 index c5ba9128b736..2b8b892eb846 100644
1302 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1303 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1304 @@ -4368,7 +4368,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
1305 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
1306 struct dc_stream_state *stream_state)
1307 {
1308 - stream_state->mode_changed = crtc_state->mode_changed;
1309 + stream_state->mode_changed =
1310 + crtc_state->mode_changed || crtc_state->active_changed;
1311 }
1312
1313 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
1314 @@ -4389,10 +4390,22 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
1315 */
1316 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1317 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
1318 + struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
1319 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
1320
1321 - if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
1322 + if (drm_atomic_crtc_needs_modeset(new_crtc_state)
1323 + && dm_old_crtc_state->stream) {
1324 + /*
1325 + * CRC capture was enabled but not disabled.
1326 + * Release the vblank reference.
1327 + */
1328 + if (dm_new_crtc_state->crc_enabled) {
1329 + drm_crtc_vblank_put(crtc);
1330 + dm_new_crtc_state->crc_enabled = false;
1331 + }
1332 +
1333 manage_dm_interrupts(adev, acrtc, false);
1334 + }
1335 }
1336 /* Add check here for SoC's that support hardware cursor plane, to
1337 * unset legacy_cursor_update */
1338 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1339 index 6a6d977ddd7a..36a0bed9af07 100644
1340 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1341 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
1342 @@ -51,6 +51,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
1343 {
1344 struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
1345 struct dc_stream_state *stream_state = crtc_state->stream;
1346 + bool enable;
1347
1348 enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
1349
1350 @@ -65,28 +66,27 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
1351 return -EINVAL;
1352 }
1353
1354 + enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO);
1355 +
1356 + if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
1357 + enable, enable))
1358 + return -EINVAL;
1359 +
1360 /* When enabling CRC, we should also disable dithering. */
1361 - if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
1362 - if (dc_stream_configure_crc(stream_state->ctx->dc,
1363 - stream_state,
1364 - true, true)) {
1365 - crtc_state->crc_enabled = true;
1366 - dc_stream_set_dither_option(stream_state,
1367 - DITHER_OPTION_TRUN8);
1368 - }
1369 - else
1370 - return -EINVAL;
1371 - } else {
1372 - if (dc_stream_configure_crc(stream_state->ctx->dc,
1373 - stream_state,
1374 - false, false)) {
1375 - crtc_state->crc_enabled = false;
1376 - dc_stream_set_dither_option(stream_state,
1377 - DITHER_OPTION_DEFAULT);
1378 - }
1379 - else
1380 - return -EINVAL;
1381 - }
1382 + dc_stream_set_dither_option(stream_state,
1383 + enable ? DITHER_OPTION_TRUN8
1384 + : DITHER_OPTION_DEFAULT);
1385 +
1386 + /*
1387 + * Reading the CRC requires the vblank interrupt handler to be
1388 + * enabled. Keep a reference until CRC capture stops.
1389 + */
1390 + if (!crtc_state->crc_enabled && enable)
1391 + drm_crtc_vblank_get(crtc);
1392 + else if (crtc_state->crc_enabled && !enable)
1393 + drm_crtc_vblank_put(crtc);
1394 +
1395 + crtc_state->crc_enabled = enable;
1396
1397 *values_cnt = 3;
1398 /* Reset crc_skipped on dm state */
1399 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
1400 index 9045e6fa0780..bb0cda727605 100644
1401 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
1402 +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
1403 @@ -958,6 +958,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
1404 /* pplib is notified if disp_num changed */
1405 dc->hwss.set_bandwidth(dc, context, true);
1406
1407 + for (i = 0; i < context->stream_count; i++)
1408 + context->streams[i]->mode_changed = false;
1409 +
1410 dc_release_state(dc->current_state);
1411
1412 dc->current_state = context;
1413 diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
1414 index 4058b59d9bea..a0355709abd1 100644
1415 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
1416 +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
1417 @@ -2336,9 +2336,10 @@ static void dcn10_apply_ctx_for_surface(
1418 }
1419 }
1420
1421 - if (!pipe_ctx->plane_state &&
1422 - old_pipe_ctx->plane_state &&
1423 - old_pipe_ctx->stream_res.tg == tg) {
1424 + if ((!pipe_ctx->plane_state ||
1425 + pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
1426 + old_pipe_ctx->plane_state &&
1427 + old_pipe_ctx->stream_res.tg == tg) {
1428
1429 dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
1430 removed_pipe[i] = true;
1431 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
1432 index d708472d93c4..65f58e23e03d 100644
1433 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
1434 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
1435 @@ -3278,6 +3278,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
1436 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
1437 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
1438 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
1439 + msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
1440 }
1441 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
1442 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
1443 diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
1444 index e65596617239..a0663f44e218 100644
1445 --- a/drivers/gpu/drm/drm_fb_helper.c
1446 +++ b/drivers/gpu/drm/drm_fb_helper.c
1447 @@ -2877,7 +2877,7 @@ int drm_fb_helper_fbdev_setup(struct drm_device *dev,
1448 return 0;
1449
1450 err_drm_fb_helper_fini:
1451 - drm_fb_helper_fini(fb_helper);
1452 + drm_fb_helper_fbdev_teardown(dev);
1453
1454 return ret;
1455 }
1456 diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
1457 index 6153cbda239f..d36b1be632d9 100644
1458 --- a/drivers/gpu/drm/drm_plane.c
1459 +++ b/drivers/gpu/drm/drm_plane.c
1460 @@ -211,6 +211,9 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
1461 format_modifier_count++;
1462 }
1463
1464 + if (format_modifier_count)
1465 + config->allow_fb_modifiers = true;
1466 +
1467 plane->modifier_count = format_modifier_count;
1468 plane->modifiers = kmalloc_array(format_modifier_count,
1469 sizeof(format_modifiers[0]),
1470 diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
1471 index 6a4ca139cf5d..8fd8124d72ba 100644
1472 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
1473 +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
1474 @@ -750,7 +750,9 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
1475 /* Disable the crtc to ensure a full modeset is
1476 * performed whenever it's turned on again. */
1477 if (crtc)
1478 - drm_crtc_force_disable(crtc);
1479 + drm_crtc_helper_set_mode(crtc, &crtc->mode,
1480 + crtc->x, crtc->y,
1481 + crtc->primary->fb);
1482 }
1483
1484 return 0;
1485 diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
1486 index fb46df56f0c4..0386b454e221 100644
1487 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
1488 +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
1489 @@ -300,6 +300,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
1490 dev_dbg(rcdu->dev,
1491 "connected entity %pOF is disabled, skipping\n",
1492 entity);
1493 + of_node_put(entity);
1494 return -ENODEV;
1495 }
1496
1497 @@ -335,6 +336,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
1498 dev_warn(rcdu->dev,
1499 "no encoder found for endpoint %pOF, skipping\n",
1500 ep->local_node);
1501 + of_node_put(entity);
1502 return -ENODEV;
1503 }
1504
1505 diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
1506 index 1ea2dd35bca9..0a271f762a0a 100644
1507 --- a/drivers/gpu/drm/vkms/vkms_crtc.c
1508 +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
1509 @@ -55,6 +55,9 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
1510
1511 *vblank_time = output->vblank_hrtimer.node.expires;
1512
1513 + if (!in_vblank_irq)
1514 + *vblank_time -= output->period_ns;
1515 +
1516 return true;
1517 }
1518
1519 diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
1520 index bfbca7ec54ce..e00b9dbe220f 100644
1521 --- a/drivers/hid/intel-ish-hid/ipc/ipc.c
1522 +++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
1523 @@ -91,7 +91,10 @@ static bool check_generated_interrupt(struct ishtp_device *dev)
1524 IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
1525 } else {
1526 pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT);
1527 - interrupt_generated = IPC_INT_FROM_ISH_TO_HOST_BXT(pisr_val);
1528 + interrupt_generated = !!pisr_val;
1529 + /* only busy-clear bit is RW, others are RO */
1530 + if (pisr_val)
1531 + ish_reg_write(dev, IPC_REG_PISR_BXT, pisr_val);
1532 }
1533
1534 return interrupt_generated;
1535 @@ -843,11 +846,11 @@ int ish_hw_start(struct ishtp_device *dev)
1536 {
1537 ish_set_host_rdy(dev);
1538
1539 + set_host_ready(dev);
1540 +
1541 /* After that we can enable ISH DMA operation and wakeup ISHFW */
1542 ish_wakeup(dev);
1543
1544 - set_host_ready(dev);
1545 -
1546 /* wait for FW-initiated reset flow */
1547 if (!dev->recvd_hw_ready)
1548 wait_event_interruptible_timeout(dev->wait_hw_ready,
1549 diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
1550 index 2623a567ffba..f546635e9ac9 100644
1551 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c
1552 +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
1553 @@ -623,7 +623,8 @@ int ishtp_cl_device_bind(struct ishtp_cl *cl)
1554 spin_lock_irqsave(&cl->dev->device_list_lock, flags);
1555 list_for_each_entry(cl_device, &cl->dev->device_list,
1556 device_link) {
1557 - if (cl_device->fw_client->client_id == cl->fw_client_id) {
1558 + if (cl_device->fw_client &&
1559 + cl_device->fw_client->client_id == cl->fw_client_id) {
1560 cl->device = cl_device;
1561 rv = 0;
1562 break;
1563 @@ -683,6 +684,7 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
1564 spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
1565 list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
1566 device_link) {
1567 + cl_device->fw_client = NULL;
1568 if (warm_reset && cl_device->reference_count)
1569 continue;
1570
1571 diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
1572 index 677695635211..0f5e03e4df22 100644
1573 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c
1574 +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
1575 @@ -181,15 +181,15 @@ static void etm_free_aux(void *data)
1576 schedule_work(&event_data->work);
1577 }
1578
1579 -static void *etm_setup_aux(int event_cpu, void **pages,
1580 +static void *etm_setup_aux(struct perf_event *event, void **pages,
1581 int nr_pages, bool overwrite)
1582 {
1583 - int cpu;
1584 + int cpu = event->cpu;
1585 cpumask_t *mask;
1586 struct coresight_device *sink;
1587 struct etm_event_data *event_data = NULL;
1588
1589 - event_data = alloc_event_data(event_cpu);
1590 + event_data = alloc_event_data(cpu);
1591 if (!event_data)
1592 return NULL;
1593 INIT_WORK(&event_data->work, free_event_data);
1594 diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
1595 index 1d94ebec027b..2bce7cf0b0af 100644
1596 --- a/drivers/hwtracing/coresight/coresight-etm4x.c
1597 +++ b/drivers/hwtracing/coresight/coresight-etm4x.c
1598 @@ -54,7 +54,8 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
1599
1600 static bool etm4_arch_supported(u8 arch)
1601 {
1602 - switch (arch) {
1603 + /* Mask out the minor version number */
1604 + switch (arch & 0xf0) {
1605 case ETM_ARCH_V4:
1606 break;
1607 default:
1608 diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
1609 index 6cb7ad608bcd..0f01cdba9d2c 100644
1610 --- a/drivers/i2c/i2c-core-of.c
1611 +++ b/drivers/i2c/i2c-core-of.c
1612 @@ -121,6 +121,17 @@ static int of_dev_node_match(struct device *dev, void *data)
1613 return dev->of_node == data;
1614 }
1615
1616 +static int of_dev_or_parent_node_match(struct device *dev, void *data)
1617 +{
1618 + if (dev->of_node == data)
1619 + return 1;
1620 +
1621 + if (dev->parent)
1622 + return dev->parent->of_node == data;
1623 +
1624 + return 0;
1625 +}
1626 +
1627 /* must call put_device() when done with returned i2c_client device */
1628 struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
1629 {
1630 @@ -145,7 +156,8 @@ struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
1631 struct device *dev;
1632 struct i2c_adapter *adapter;
1633
1634 - dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
1635 + dev = bus_find_device(&i2c_bus_type, NULL, node,
1636 + of_dev_or_parent_node_match);
1637 if (!dev)
1638 return NULL;
1639
1640 diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
1641 index b093ecddf1a8..54db848f0bcd 100644
1642 --- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
1643 +++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
1644 @@ -423,18 +423,14 @@ static irqreturn_t pm8xxx_eoc_irq(int irq, void *d)
1645 static struct pm8xxx_chan_info *
1646 pm8xxx_get_channel(struct pm8xxx_xoadc *adc, u8 chan)
1647 {
1648 - struct pm8xxx_chan_info *ch;
1649 int i;
1650
1651 for (i = 0; i < adc->nchans; i++) {
1652 - ch = &adc->chans[i];
1653 + struct pm8xxx_chan_info *ch = &adc->chans[i];
1654 if (ch->hwchan->amux_channel == chan)
1655 - break;
1656 + return ch;
1657 }
1658 - if (i == adc->nchans)
1659 - return NULL;
1660 -
1661 - return ch;
1662 + return NULL;
1663 }
1664
1665 static int pm8xxx_read_channel_rsv(struct pm8xxx_xoadc *adc,
1666 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
1667 index 0f83cbec33f3..a68569ec86bf 100644
1668 --- a/drivers/infiniband/hw/cxgb4/cm.c
1669 +++ b/drivers/infiniband/hw/cxgb4/cm.c
1670 @@ -1904,8 +1904,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1671 }
1672 mutex_unlock(&ep->com.mutex);
1673
1674 - if (release)
1675 + if (release) {
1676 + close_complete_upcall(ep, -ECONNRESET);
1677 release_ep_resources(ep);
1678 + }
1679 c4iw_put_ep(&ep->com);
1680 return 0;
1681 }
1682 @@ -3608,7 +3610,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
1683 if (close) {
1684 if (abrupt) {
1685 set_bit(EP_DISC_ABORT, &ep->com.history);
1686 - close_complete_upcall(ep, -ECONNRESET);
1687 ret = send_abort(ep);
1688 } else {
1689 set_bit(EP_DISC_CLOSE, &ep->com.history);
1690 diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
1691 index fedaf8260105..8c79a480f2b7 100644
1692 --- a/drivers/infiniband/hw/mlx4/cm.c
1693 +++ b/drivers/infiniband/hw/mlx4/cm.c
1694 @@ -39,7 +39,7 @@
1695
1696 #include "mlx4_ib.h"
1697
1698 -#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ)
1699 +#define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
1700
1701 struct id_map_entry {
1702 struct rb_node node;
1703 diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
1704 index 23520df7650f..55cd6e0b409c 100644
1705 --- a/drivers/input/misc/soc_button_array.c
1706 +++ b/drivers/input/misc/soc_button_array.c
1707 @@ -373,7 +373,7 @@ static struct soc_button_info soc_button_PNP0C40[] = {
1708 { "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
1709 { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
1710 { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
1711 - { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
1712 + { "rotation_lock", 4, EV_KEY, KEY_ROTATE_LOCK_TOGGLE, false, false },
1713 { }
1714 };
1715
1716 diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
1717 index fde728ea2900..48d4709a8e93 100644
1718 --- a/drivers/iommu/io-pgtable-arm-v7s.c
1719 +++ b/drivers/iommu/io-pgtable-arm-v7s.c
1720 @@ -228,7 +228,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
1721 if (dma != phys)
1722 goto out_unmap;
1723 }
1724 - kmemleak_ignore(table);
1725 + if (lvl == 2)
1726 + kmemleak_ignore(table);
1727 return table;
1728
1729 out_unmap:
1730 diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
1731 index 3d79a6380761..723f2f17497a 100644
1732 --- a/drivers/leds/leds-lp55xx-common.c
1733 +++ b/drivers/leds/leds-lp55xx-common.c
1734 @@ -201,7 +201,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
1735
1736 if (!fw) {
1737 dev_err(dev, "firmware request failed\n");
1738 - goto out;
1739 + return;
1740 }
1741
1742 /* handling firmware data is chip dependent */
1743 @@ -214,9 +214,9 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
1744
1745 mutex_unlock(&chip->lock);
1746
1747 -out:
1748 /* firmware should be released for other channel use */
1749 release_firmware(chip->fw);
1750 + chip->fw = NULL;
1751 }
1752
1753 static int lp55xx_request_firmware(struct lp55xx_chip *chip)
1754 diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
1755 index 26f035a0c5b9..d9481640b3e1 100644
1756 --- a/drivers/md/bcache/sysfs.c
1757 +++ b/drivers/md/bcache/sysfs.c
1758 @@ -283,8 +283,12 @@ STORE(__cached_dev)
1759 sysfs_strtoul_clamp(writeback_rate_update_seconds,
1760 dc->writeback_rate_update_seconds,
1761 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
1762 - d_strtoul(writeback_rate_i_term_inverse);
1763 - d_strtoul_nonzero(writeback_rate_p_term_inverse);
1764 + sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
1765 + dc->writeback_rate_i_term_inverse,
1766 + 1, UINT_MAX);
1767 + sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
1768 + dc->writeback_rate_p_term_inverse,
1769 + 1, UINT_MAX);
1770 d_strtoul_nonzero(writeback_rate_minimum);
1771
1772 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
1773 @@ -295,7 +299,9 @@ STORE(__cached_dev)
1774 dc->io_disable = v ? 1 : 0;
1775 }
1776
1777 - d_strtoi_h(sequential_cutoff);
1778 + sysfs_strtoul_clamp(sequential_cutoff,
1779 + dc->sequential_cutoff,
1780 + 0, UINT_MAX);
1781 d_strtoi_h(readahead);
1782
1783 if (attr == &sysfs_clear_stats)
1784 @@ -766,8 +772,17 @@ STORE(__bch_cache_set)
1785 c->error_limit = strtoul_or_return(buf);
1786
1787 /* See count_io_errors() for why 88 */
1788 - if (attr == &sysfs_io_error_halflife)
1789 - c->error_decay = strtoul_or_return(buf) / 88;
1790 + if (attr == &sysfs_io_error_halflife) {
1791 + unsigned long v = 0;
1792 + ssize_t ret;
1793 +
1794 + ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
1795 + if (!ret) {
1796 + c->error_decay = v / 88;
1797 + return size;
1798 + }
1799 + return ret;
1800 + }
1801
1802 if (attr == &sysfs_io_disable) {
1803 v = strtoul_or_return(buf);
1804 diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
1805 index 3fe82425859c..0ad2715a884e 100644
1806 --- a/drivers/md/bcache/sysfs.h
1807 +++ b/drivers/md/bcache/sysfs.h
1808 @@ -81,9 +81,16 @@ do { \
1809
1810 #define sysfs_strtoul_clamp(file, var, min, max) \
1811 do { \
1812 - if (attr == &sysfs_ ## file) \
1813 - return strtoul_safe_clamp(buf, var, min, max) \
1814 - ?: (ssize_t) size; \
1815 + if (attr == &sysfs_ ## file) { \
1816 + unsigned long v = 0; \
1817 + ssize_t ret; \
1818 + ret = strtoul_safe_clamp(buf, v, min, max); \
1819 + if (!ret) { \
1820 + var = v; \
1821 + return size; \
1822 + } \
1823 + return ret; \
1824 + } \
1825 } while (0)
1826
1827 #define strtoul_or_return(cp) \
1828 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
1829 index cd4220ee7004..435a2ee4a392 100644
1830 --- a/drivers/md/dm-thin.c
1831 +++ b/drivers/md/dm-thin.c
1832 @@ -3283,6 +3283,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1833 as.argc = argc;
1834 as.argv = argv;
1835
1836 + /* make sure metadata and data are different devices */
1837 + if (!strcmp(argv[0], argv[1])) {
1838 + ti->error = "Error setting metadata or data device";
1839 + r = -EINVAL;
1840 + goto out_unlock;
1841 + }
1842 +
1843 /*
1844 * Set default pool features.
1845 */
1846 @@ -4167,6 +4174,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
1847 tc->sort_bio_list = RB_ROOT;
1848
1849 if (argc == 3) {
1850 + if (!strcmp(argv[0], argv[2])) {
1851 + ti->error = "Error setting origin device";
1852 + r = -EINVAL;
1853 + goto bad_origin_dev;
1854 + }
1855 +
1856 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
1857 if (r) {
1858 ti->error = "Error opening origin device";
1859 diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
1860 index efda1aa95ca0..7a7d3969af20 100644
1861 --- a/drivers/media/i2c/mt9m111.c
1862 +++ b/drivers/media/i2c/mt9m111.c
1863 @@ -1014,6 +1014,8 @@ static int mt9m111_probe(struct i2c_client *client,
1864 mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
1865 mt9m111->rect.width = MT9M111_MAX_WIDTH;
1866 mt9m111->rect.height = MT9M111_MAX_HEIGHT;
1867 + mt9m111->width = mt9m111->rect.width;
1868 + mt9m111->height = mt9m111->rect.height;
1869 mt9m111->fmt = &mt9m111_colour_fmts[0];
1870 mt9m111->lastpage = -1;
1871 mutex_init(&mt9m111->power_lock);
1872 diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
1873 index 605f3e25ad82..f5a1ee90a6c5 100644
1874 --- a/drivers/media/i2c/ov7740.c
1875 +++ b/drivers/media/i2c/ov7740.c
1876 @@ -1101,6 +1101,9 @@ static int ov7740_probe(struct i2c_client *client,
1877 if (ret)
1878 return ret;
1879
1880 + pm_runtime_set_active(&client->dev);
1881 + pm_runtime_enable(&client->dev);
1882 +
1883 ret = ov7740_detect(ov7740);
1884 if (ret)
1885 goto error_detect;
1886 @@ -1123,8 +1126,6 @@ static int ov7740_probe(struct i2c_client *client,
1887 if (ret)
1888 goto error_async_register;
1889
1890 - pm_runtime_set_active(&client->dev);
1891 - pm_runtime_enable(&client->dev);
1892 pm_runtime_idle(&client->dev);
1893
1894 return 0;
1895 @@ -1134,6 +1135,8 @@ error_async_register:
1896 error_init_controls:
1897 ov7740_free_controls(ov7740);
1898 error_detect:
1899 + pm_runtime_disable(&client->dev);
1900 + pm_runtime_set_suspended(&client->dev);
1901 ov7740_set_power(ov7740, 0);
1902 media_entity_cleanup(&ov7740->subdev.entity);
1903
1904 diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
1905 index 4f24da8afecc..11429633b2fb 100644
1906 --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
1907 +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
1908 @@ -702,7 +702,7 @@ end:
1909 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
1910 }
1911
1912 -static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
1913 +static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
1914 enum v4l2_buf_type type)
1915 {
1916 if (V4L2_TYPE_IS_OUTPUT(type))
1917 @@ -714,7 +714,7 @@ static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
1918 static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
1919 {
1920 struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
1921 - struct vb2_buffer *vb;
1922 + struct vb2_v4l2_buffer *vb;
1923 int ret = 0;
1924
1925 ret = pm_runtime_get_sync(ctx->jpeg->dev);
1926 @@ -724,14 +724,14 @@ static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
1927 return 0;
1928 err:
1929 while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
1930 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_QUEUED);
1931 + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED);
1932 return ret;
1933 }
1934
1935 static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
1936 {
1937 struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
1938 - struct vb2_buffer *vb;
1939 + struct vb2_v4l2_buffer *vb;
1940
1941 /*
1942 * STREAMOFF is an acknowledgment for source change event.
1943 @@ -743,7 +743,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
1944 struct mtk_jpeg_src_buf *src_buf;
1945
1946 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
1947 - src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
1948 + src_buf = mtk_jpeg_vb2_to_srcbuf(&vb->vb2_buf);
1949 mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param);
1950 ctx->state = MTK_JPEG_RUNNING;
1951 } else if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1952 @@ -751,7 +751,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
1953 }
1954
1955 while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
1956 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
1957 + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
1958
1959 pm_runtime_put_sync(ctx->jpeg->dev);
1960 }
1961 @@ -807,7 +807,7 @@ static void mtk_jpeg_device_run(void *priv)
1962 {
1963 struct mtk_jpeg_ctx *ctx = priv;
1964 struct mtk_jpeg_dev *jpeg = ctx->jpeg;
1965 - struct vb2_buffer *src_buf, *dst_buf;
1966 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
1967 enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
1968 unsigned long flags;
1969 struct mtk_jpeg_src_buf *jpeg_src_buf;
1970 @@ -817,11 +817,11 @@ static void mtk_jpeg_device_run(void *priv)
1971
1972 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
1973 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
1974 - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
1975 + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
1976
1977 if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
1978 - for (i = 0; i < dst_buf->num_planes; i++)
1979 - vb2_set_plane_payload(dst_buf, i, 0);
1980 + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
1981 + vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0);
1982 buf_state = VB2_BUF_STATE_DONE;
1983 goto dec_end;
1984 }
1985 @@ -833,8 +833,8 @@ static void mtk_jpeg_device_run(void *priv)
1986 return;
1987 }
1988
1989 - mtk_jpeg_set_dec_src(ctx, src_buf, &bs);
1990 - if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, dst_buf, &fb))
1991 + mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
1992 + if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
1993 goto dec_end;
1994
1995 spin_lock_irqsave(&jpeg->hw_lock, flags);
1996 @@ -849,8 +849,8 @@ static void mtk_jpeg_device_run(void *priv)
1997 dec_end:
1998 v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
1999 v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
2000 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
2001 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
2002 + v4l2_m2m_buf_done(src_buf, buf_state);
2003 + v4l2_m2m_buf_done(dst_buf, buf_state);
2004 v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
2005 }
2006
2007 @@ -921,7 +921,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
2008 {
2009 struct mtk_jpeg_dev *jpeg = priv;
2010 struct mtk_jpeg_ctx *ctx;
2011 - struct vb2_buffer *src_buf, *dst_buf;
2012 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
2013 struct mtk_jpeg_src_buf *jpeg_src_buf;
2014 enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
2015 u32 dec_irq_ret;
2016 @@ -938,7 +938,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
2017
2018 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
2019 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
2020 - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
2021 + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
2022
2023 if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
2024 mtk_jpeg_dec_reset(jpeg->dec_reg_base);
2025 @@ -948,15 +948,15 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
2026 goto dec_end;
2027 }
2028
2029 - for (i = 0; i < dst_buf->num_planes; i++)
2030 - vb2_set_plane_payload(dst_buf, i,
2031 + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
2032 + vb2_set_plane_payload(&dst_buf->vb2_buf, i,
2033 jpeg_src_buf->dec_param.comp_size[i]);
2034
2035 buf_state = VB2_BUF_STATE_DONE;
2036
2037 dec_end:
2038 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
2039 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
2040 + v4l2_m2m_buf_done(src_buf, buf_state);
2041 + v4l2_m2m_buf_done(dst_buf, buf_state);
2042 v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
2043 return IRQ_HANDLED;
2044 }
2045 diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
2046 index 64195c4ddeaf..419e1cb10dc6 100644
2047 --- a/drivers/media/platform/mx2_emmaprp.c
2048 +++ b/drivers/media/platform/mx2_emmaprp.c
2049 @@ -274,7 +274,7 @@ static void emmaprp_device_run(void *priv)
2050 {
2051 struct emmaprp_ctx *ctx = priv;
2052 struct emmaprp_q_data *s_q_data, *d_q_data;
2053 - struct vb2_buffer *src_buf, *dst_buf;
2054 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
2055 struct emmaprp_dev *pcdev = ctx->dev;
2056 unsigned int s_width, s_height;
2057 unsigned int d_width, d_height;
2058 @@ -294,8 +294,8 @@ static void emmaprp_device_run(void *priv)
2059 d_height = d_q_data->height;
2060 d_size = d_width * d_height;
2061
2062 - p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0);
2063 - p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
2064 + p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
2065 + p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
2066 if (!p_in || !p_out) {
2067 v4l2_err(&pcdev->v4l2_dev,
2068 "Acquiring kernel pointers to buffers failed\n");
2069 diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
2070 index ce09799976ef..e1085e3ab3cc 100644
2071 --- a/drivers/media/platform/rcar-vin/rcar-core.c
2072 +++ b/drivers/media/platform/rcar-vin/rcar-core.c
2073 @@ -131,9 +131,13 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
2074 !is_media_entity_v4l2_video_device(link->sink->entity))
2075 return 0;
2076
2077 - /* If any entity is in use don't allow link changes. */
2078 + /*
2079 + * Don't allow link changes if any entity in the graph is
2080 + * streaming, modifying the CHSEL register fields can disrupt
2081 + * running streams.
2082 + */
2083 media_device_for_each_entity(entity, &group->mdev)
2084 - if (entity->use_count)
2085 + if (entity->stream_count)
2086 return -EBUSY;
2087
2088 mutex_lock(&group->lock);
2089 diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
2090 index ab5a6f95044a..86a76f35a9a1 100644
2091 --- a/drivers/media/platform/rockchip/rga/rga.c
2092 +++ b/drivers/media/platform/rockchip/rga/rga.c
2093 @@ -43,7 +43,7 @@ static void device_run(void *prv)
2094 {
2095 struct rga_ctx *ctx = prv;
2096 struct rockchip_rga *rga = ctx->rga;
2097 - struct vb2_buffer *src, *dst;
2098 + struct vb2_v4l2_buffer *src, *dst;
2099 unsigned long flags;
2100
2101 spin_lock_irqsave(&rga->ctrl_lock, flags);
2102 @@ -53,8 +53,8 @@ static void device_run(void *prv)
2103 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2104 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
2105
2106 - rga_buf_map(src);
2107 - rga_buf_map(dst);
2108 + rga_buf_map(&src->vb2_buf);
2109 + rga_buf_map(&dst->vb2_buf);
2110
2111 rga_hw_start(rga);
2112
2113 diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
2114 index e901201b6fcc..1f58574d0b96 100644
2115 --- a/drivers/media/platform/s5p-g2d/g2d.c
2116 +++ b/drivers/media/platform/s5p-g2d/g2d.c
2117 @@ -487,7 +487,7 @@ static void device_run(void *prv)
2118 {
2119 struct g2d_ctx *ctx = prv;
2120 struct g2d_dev *dev = ctx->dev;
2121 - struct vb2_buffer *src, *dst;
2122 + struct vb2_v4l2_buffer *src, *dst;
2123 unsigned long flags;
2124 u32 cmd = 0;
2125
2126 @@ -502,10 +502,10 @@ static void device_run(void *prv)
2127 spin_lock_irqsave(&dev->ctrl_lock, flags);
2128
2129 g2d_set_src_size(dev, &ctx->in);
2130 - g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));
2131 + g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
2132
2133 g2d_set_dst_size(dev, &ctx->out);
2134 - g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));
2135 + g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
2136
2137 g2d_set_rop4(dev, ctx->rop);
2138 g2d_set_flip(dev, ctx->flip);
2139 diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
2140 index 04fd2e0493c0..350afaa29a62 100644
2141 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
2142 +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
2143 @@ -793,14 +793,14 @@ static void skip(struct s5p_jpeg_buffer *buf, long len);
2144 static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
2145 {
2146 struct s5p_jpeg *jpeg = ctx->jpeg;
2147 - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2148 + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2149 struct s5p_jpeg_buffer jpeg_buffer;
2150 unsigned int word;
2151 int c, x, components;
2152
2153 jpeg_buffer.size = 2; /* Ls */
2154 jpeg_buffer.data =
2155 - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2;
2156 + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
2157 jpeg_buffer.curr = 0;
2158
2159 word = 0;
2160 @@ -830,14 +830,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
2161 static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
2162 {
2163 struct s5p_jpeg *jpeg = ctx->jpeg;
2164 - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2165 + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2166 struct s5p_jpeg_buffer jpeg_buffer;
2167 unsigned int word;
2168 int c, i, n, j;
2169
2170 for (j = 0; j < ctx->out_q.dht.n; ++j) {
2171 jpeg_buffer.size = ctx->out_q.dht.len[j];
2172 - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
2173 + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
2174 ctx->out_q.dht.marker[j];
2175 jpeg_buffer.curr = 0;
2176
2177 @@ -889,13 +889,13 @@ static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
2178 static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
2179 {
2180 struct s5p_jpeg *jpeg = ctx->jpeg;
2181 - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2182 + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2183 struct s5p_jpeg_buffer jpeg_buffer;
2184 int c, x, components;
2185
2186 jpeg_buffer.size = ctx->out_q.sof_len;
2187 jpeg_buffer.data =
2188 - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof;
2189 + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof;
2190 jpeg_buffer.curr = 0;
2191
2192 skip(&jpeg_buffer, 5); /* P, Y, X */
2193 @@ -920,14 +920,14 @@ static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
2194 static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
2195 {
2196 struct s5p_jpeg *jpeg = ctx->jpeg;
2197 - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2198 + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2199 struct s5p_jpeg_buffer jpeg_buffer;
2200 unsigned int word;
2201 int c, i, j;
2202
2203 for (j = 0; j < ctx->out_q.dqt.n; ++j) {
2204 jpeg_buffer.size = ctx->out_q.dqt.len[j];
2205 - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
2206 + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
2207 ctx->out_q.dqt.marker[j];
2208 jpeg_buffer.curr = 0;
2209
2210 @@ -1293,13 +1293,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
2211 return 0;
2212 }
2213
2214 -static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
2215 +static int enum_fmt(struct s5p_jpeg_ctx *ctx,
2216 + struct s5p_jpeg_fmt *sjpeg_formats, int n,
2217 struct v4l2_fmtdesc *f, u32 type)
2218 {
2219 int i, num = 0;
2220 + unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
2221
2222 for (i = 0; i < n; ++i) {
2223 - if (sjpeg_formats[i].flags & type) {
2224 + if (sjpeg_formats[i].flags & type &&
2225 + sjpeg_formats[i].flags & fmt_ver_flag) {
2226 /* index-th format of type type found ? */
2227 if (num == f->index)
2228 break;
2229 @@ -1326,11 +1329,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
2230 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
2231
2232 if (ctx->mode == S5P_JPEG_ENCODE)
2233 - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
2234 + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
2235 SJPEG_FMT_FLAG_ENC_CAPTURE);
2236
2237 - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
2238 - SJPEG_FMT_FLAG_DEC_CAPTURE);
2239 + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
2240 + SJPEG_FMT_FLAG_DEC_CAPTURE);
2241 }
2242
2243 static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
2244 @@ -1339,11 +1342,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
2245 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
2246
2247 if (ctx->mode == S5P_JPEG_ENCODE)
2248 - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
2249 + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
2250 SJPEG_FMT_FLAG_ENC_OUTPUT);
2251
2252 - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
2253 - SJPEG_FMT_FLAG_DEC_OUTPUT);
2254 + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
2255 + SJPEG_FMT_FLAG_DEC_OUTPUT);
2256 }
2257
2258 static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
2259 @@ -2072,15 +2075,15 @@ static void s5p_jpeg_device_run(void *priv)
2260 {
2261 struct s5p_jpeg_ctx *ctx = priv;
2262 struct s5p_jpeg *jpeg = ctx->jpeg;
2263 - struct vb2_buffer *src_buf, *dst_buf;
2264 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
2265 unsigned long src_addr, dst_addr, flags;
2266
2267 spin_lock_irqsave(&ctx->jpeg->slock, flags);
2268
2269 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2270 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
2271 - src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
2272 - dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
2273 + src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
2274 + dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
2275
2276 s5p_jpeg_reset(jpeg->regs);
2277 s5p_jpeg_poweron(jpeg->regs);
2278 @@ -2153,7 +2156,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
2279 {
2280 struct s5p_jpeg *jpeg = ctx->jpeg;
2281 struct s5p_jpeg_fmt *fmt;
2282 - struct vb2_buffer *vb;
2283 + struct vb2_v4l2_buffer *vb;
2284 struct s5p_jpeg_addr jpeg_addr = {};
2285 u32 pix_size, padding_bytes = 0;
2286
2287 @@ -2172,7 +2175,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
2288 vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
2289 }
2290
2291 - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
2292 + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
2293
2294 if (fmt->colplanes == 2) {
2295 jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
2296 @@ -2190,7 +2193,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
2297 static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
2298 {
2299 struct s5p_jpeg *jpeg = ctx->jpeg;
2300 - struct vb2_buffer *vb;
2301 + struct vb2_v4l2_buffer *vb;
2302 unsigned int jpeg_addr = 0;
2303
2304 if (ctx->mode == S5P_JPEG_ENCODE)
2305 @@ -2198,7 +2201,7 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
2306 else
2307 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2308
2309 - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
2310 + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
2311 if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
2312 ctx->mode == S5P_JPEG_DECODE)
2313 jpeg_addr += ctx->out_q.sos;
2314 @@ -2314,7 +2317,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
2315 {
2316 struct s5p_jpeg *jpeg = ctx->jpeg;
2317 struct s5p_jpeg_fmt *fmt;
2318 - struct vb2_buffer *vb;
2319 + struct vb2_v4l2_buffer *vb;
2320 struct s5p_jpeg_addr jpeg_addr = {};
2321 u32 pix_size;
2322
2323 @@ -2328,7 +2331,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
2324 fmt = ctx->cap_q.fmt;
2325 }
2326
2327 - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
2328 + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
2329
2330 if (fmt->colplanes == 2) {
2331 jpeg_addr.cb = jpeg_addr.y + pix_size;
2332 @@ -2346,7 +2349,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
2333 static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
2334 {
2335 struct s5p_jpeg *jpeg = ctx->jpeg;
2336 - struct vb2_buffer *vb;
2337 + struct vb2_v4l2_buffer *vb;
2338 unsigned int jpeg_addr = 0;
2339
2340 if (ctx->mode == S5P_JPEG_ENCODE)
2341 @@ -2354,7 +2357,7 @@ static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
2342 else
2343 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
2344
2345 - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
2346 + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
2347 exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr);
2348 }
2349
2350 diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
2351 index 1d274c64de09..03ee9839a03e 100644
2352 --- a/drivers/media/platform/sh_veu.c
2353 +++ b/drivers/media/platform/sh_veu.c
2354 @@ -273,13 +273,13 @@ static void sh_veu_process(struct sh_veu_dev *veu,
2355 static void sh_veu_device_run(void *priv)
2356 {
2357 struct sh_veu_dev *veu = priv;
2358 - struct vb2_buffer *src_buf, *dst_buf;
2359 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
2360
2361 src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
2362 dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
2363
2364 if (src_buf && dst_buf)
2365 - sh_veu_process(veu, src_buf, dst_buf);
2366 + sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
2367 }
2368
2369 /* ========== video ioctls ========== */
2370 diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
2371 index c60a7625b1fa..b2873a2432b6 100644
2372 --- a/drivers/mmc/host/omap.c
2373 +++ b/drivers/mmc/host/omap.c
2374 @@ -920,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques
2375 reg &= ~(1 << 5);
2376 OMAP_MMC_WRITE(host, SDIO, reg);
2377 /* Set maximum timeout */
2378 - OMAP_MMC_WRITE(host, CTO, 0xff);
2379 + OMAP_MMC_WRITE(host, CTO, 0xfd);
2380 }
2381
2382 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
2383 diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
2384 index c078c791f481..dabe89968a78 100644
2385 --- a/drivers/net/dsa/mv88e6xxx/chip.c
2386 +++ b/drivers/net/dsa/mv88e6xxx/chip.c
2387 @@ -442,12 +442,20 @@ out_mapping:
2388
2389 static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
2390 {
2391 + static struct lock_class_key lock_key;
2392 + static struct lock_class_key request_key;
2393 int err;
2394
2395 err = mv88e6xxx_g1_irq_setup_common(chip);
2396 if (err)
2397 return err;
2398
2399 + /* These lock classes tells lockdep that global 1 irqs are in
2400 + * a different category than their parent GPIO, so it won't
2401 + * report false recursion.
2402 + */
2403 + irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
2404 +
2405 err = request_threaded_irq(chip->irq, NULL,
2406 mv88e6xxx_g1_irq_thread_fn,
2407 IRQF_ONESHOT,
2408 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
2409 index 9a7f70db20c7..733d9172425b 100644
2410 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
2411 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
2412 @@ -119,7 +119,7 @@ static void enic_init_affinity_hint(struct enic *enic)
2413
2414 for (i = 0; i < enic->intr_count; i++) {
2415 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
2416 - (enic->msix[i].affinity_mask &&
2417 + (cpumask_available(enic->msix[i].affinity_mask) &&
2418 !cpumask_empty(enic->msix[i].affinity_mask)))
2419 continue;
2420 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
2421 @@ -148,7 +148,7 @@ static void enic_set_affinity_hint(struct enic *enic)
2422 for (i = 0; i < enic->intr_count; i++) {
2423 if (enic_is_err_intr(enic, i) ||
2424 enic_is_notify_intr(enic, i) ||
2425 - !enic->msix[i].affinity_mask ||
2426 + !cpumask_available(enic->msix[i].affinity_mask) ||
2427 cpumask_empty(enic->msix[i].affinity_mask))
2428 continue;
2429 err = irq_set_affinity_hint(enic->msix_entry[i].vector,
2430 @@ -161,7 +161,7 @@ static void enic_set_affinity_hint(struct enic *enic)
2431 for (i = 0; i < enic->wq_count; i++) {
2432 int wq_intr = enic_msix_wq_intr(enic, i);
2433
2434 - if (enic->msix[wq_intr].affinity_mask &&
2435 + if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
2436 !cpumask_empty(enic->msix[wq_intr].affinity_mask))
2437 netif_set_xps_queue(enic->netdev,
2438 enic->msix[wq_intr].affinity_mask,
2439 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
2440 index 3ba0c90e7055..8b11682ebba2 100644
2441 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
2442 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
2443 @@ -2106,7 +2106,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
2444 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2445 snprintf(adapter->rx_ring->name,
2446 sizeof(adapter->rx_ring->name) - 1,
2447 - "%s-rx-0", netdev->name);
2448 + "%.14s-rx-0", netdev->name);
2449 else
2450 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2451 err = request_irq(adapter->msix_entries[vector].vector,
2452 @@ -2122,7 +2122,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
2453 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2454 snprintf(adapter->tx_ring->name,
2455 sizeof(adapter->tx_ring->name) - 1,
2456 - "%s-tx-0", netdev->name);
2457 + "%.14s-tx-0", netdev->name);
2458 else
2459 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2460 err = request_irq(adapter->msix_entries[vector].vector,
2461 @@ -5286,8 +5286,13 @@ static void e1000_watchdog_task(struct work_struct *work)
2462 /* 8000ES2LAN requires a Rx packet buffer work-around
2463 * on link down event; reset the controller to flush
2464 * the Rx packet buffer.
2465 + *
2466 + * If the link is lost the controller stops DMA, but
2467 + * if there is queued Tx work it cannot be done. So
2468 + * reset the controller to flush the Tx packet buffers.
2469 */
2470 - if (adapter->flags & FLAG_RX_NEEDS_RESTART)
2471 + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
2472 + e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
2473 adapter->flags |= FLAG_RESTART_NOW;
2474 else
2475 pm_schedule_suspend(netdev->dev.parent,
2476 @@ -5310,14 +5315,6 @@ link_up:
2477 adapter->gotc_old = adapter->stats.gotc;
2478 spin_unlock(&adapter->stats64_lock);
2479
2480 - /* If the link is lost the controller stops DMA, but
2481 - * if there is queued Tx work it cannot be done. So
2482 - * reset the controller to flush the Tx packet buffers.
2483 - */
2484 - if (!netif_carrier_ok(netdev) &&
2485 - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
2486 - adapter->flags |= FLAG_RESTART_NOW;
2487 -
2488 /* If reset is necessary, do it outside of interrupt context. */
2489 if (adapter->flags & FLAG_RESTART_NOW) {
2490 schedule_work(&adapter->reset_task);
2491 @@ -7330,6 +7327,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2492
2493 e1000_print_device_info(adapter);
2494
2495 + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
2496 +
2497 if (pci_dev_run_wake(pdev))
2498 pm_runtime_put_noidle(&pdev->dev);
2499
2500 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2501 index f8e4808a8317..9988c89ed9fd 100644
2502 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2503 +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
2504 @@ -1372,13 +1372,9 @@ static void mvpp2_port_reset(struct mvpp2_port *port)
2505 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
2506 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
2507
2508 - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2509 - ~MVPP2_GMAC_PORT_RESET_MASK;
2510 + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
2511 + MVPP2_GMAC_PORT_RESET_MASK;
2512 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
2513 -
2514 - while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2515 - MVPP2_GMAC_PORT_RESET_MASK)
2516 - continue;
2517 }
2518
2519 /* Change maximum receive size of the port */
2520 @@ -4445,12 +4441,15 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
2521 const struct phylink_link_state *state)
2522 {
2523 u32 an, ctrl0, ctrl2, ctrl4;
2524 + u32 old_ctrl2;
2525
2526 an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2527 ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
2528 ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
2529 ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
2530
2531 + old_ctrl2 = ctrl2;
2532 +
2533 /* Force link down */
2534 an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
2535 an |= MVPP2_GMAC_FORCE_LINK_DOWN;
2536 @@ -4523,6 +4522,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
2537 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
2538 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
2539 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
2540 +
2541 + if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
2542 + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
2543 + MVPP2_GMAC_PORT_RESET_MASK)
2544 + continue;
2545 + }
2546 }
2547
2548 static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
2549 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2550 index d6706475a3ba..26c9f9421901 100644
2551 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2552 +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
2553 @@ -1797,7 +1797,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
2554 u64 node_guid;
2555 int err = 0;
2556
2557 - if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
2558 + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
2559 return -EPERM;
2560 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
2561 return -EINVAL;
2562 @@ -1871,7 +1871,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
2563 {
2564 struct mlx5_vport *evport;
2565
2566 - if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
2567 + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
2568 return -EPERM;
2569 if (!LEGAL_VPORT(esw, vport))
2570 return -EINVAL;
2571 @@ -2044,19 +2044,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2572 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
2573 u32 max_rate, u32 min_rate)
2574 {
2575 - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2576 - bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2577 - fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2578 - bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2579 struct mlx5_vport *evport;
2580 + u32 fw_max_bw_share;
2581 u32 previous_min_rate;
2582 u32 divider;
2583 + bool min_rate_supported;
2584 + bool max_rate_supported;
2585 int err = 0;
2586
2587 if (!ESW_ALLOWED(esw))
2588 return -EPERM;
2589 if (!LEGAL_VPORT(esw, vport))
2590 return -EINVAL;
2591 +
2592 + fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2593 + min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2594 + fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2595 + max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2596 +
2597 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2598 return -EOPNOTSUPP;
2599
2600 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2601 index a12b5710891e..f9bef030ee05 100644
2602 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2603 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
2604 @@ -1988,7 +1988,7 @@ static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2605 int i;
2606
2607 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2608 - snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2609 + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2610 mlxsw_sp_port_hw_prio_stats[i].str, prio);
2611 *p += ETH_GSTRING_LEN;
2612 }
2613 @@ -1999,7 +1999,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2614 int i;
2615
2616 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2617 - snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
2618 + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2619 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2620 *p += ETH_GSTRING_LEN;
2621 }
2622 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2623 index 43ab9e905bed..886176be818e 100644
2624 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2625 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
2626 @@ -474,7 +474,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
2627 struct dma_desc *p, struct sk_buff *skb)
2628 {
2629 struct skb_shared_hwtstamps shhwtstamp;
2630 - u64 ns;
2631 + u64 ns = 0;
2632
2633 if (!priv->hwts_tx_en)
2634 return;
2635 @@ -513,7 +513,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
2636 {
2637 struct skb_shared_hwtstamps *shhwtstamp = NULL;
2638 struct dma_desc *desc = p;
2639 - u64 ns;
2640 + u64 ns = 0;
2641
2642 if (!priv->hwts_rx_en)
2643 return;
2644 @@ -558,8 +558,8 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
2645 u32 snap_type_sel = 0;
2646 u32 ts_master_en = 0;
2647 u32 ts_event_en = 0;
2648 + u32 sec_inc = 0;
2649 u32 value = 0;
2650 - u32 sec_inc;
2651 bool xmac;
2652
2653 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
2654 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
2655 index 2293e21f789f..cc60b3fb0892 100644
2656 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
2657 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
2658 @@ -105,7 +105,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
2659 struct stmmac_priv *priv =
2660 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
2661 unsigned long flags;
2662 - u64 ns;
2663 + u64 ns = 0;
2664
2665 spin_lock_irqsave(&priv->ptp_lock, flags);
2666 stmmac_get_systime(priv, priv->ptpaddr, &ns);
2667 diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
2668 index e1225545362d..0ba3607585bd 100644
2669 --- a/drivers/net/phy/phy-c45.c
2670 +++ b/drivers/net/phy/phy-c45.c
2671 @@ -147,9 +147,15 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask)
2672 mmd_mask &= ~BIT(devad);
2673
2674 /* The link state is latched low so that momentary link
2675 - * drops can be detected. Do not double-read the status
2676 - * register if the link is down.
2677 + * drops can be detected. Do not double-read the status
2678 + * in polling mode to detect such short link drops.
2679 */
2680 + if (!phy_polling_mode(phydev)) {
2681 + val = phy_read_mmd(phydev, devad, MDIO_STAT1);
2682 + if (val < 0)
2683 + return val;
2684 + }
2685 +
2686 val = phy_read_mmd(phydev, devad, MDIO_STAT1);
2687 if (val < 0)
2688 return val;
2689 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
2690 index 2c32c795f5dd..8a96d985a52f 100644
2691 --- a/drivers/net/phy/phy_device.c
2692 +++ b/drivers/net/phy/phy_device.c
2693 @@ -1503,10 +1503,15 @@ int genphy_update_link(struct phy_device *phydev)
2694 {
2695 int status;
2696
2697 - /* Do a fake read */
2698 - status = phy_read(phydev, MII_BMSR);
2699 - if (status < 0)
2700 - return status;
2701 + /* The link state is latched low so that momentary link
2702 + * drops can be detected. Do not double-read the status
2703 + * in polling mode to detect such short link drops.
2704 + */
2705 + if (!phy_polling_mode(phydev)) {
2706 + status = phy_read(phydev, MII_BMSR);
2707 + if (status < 0)
2708 + return status;
2709 + }
2710
2711 /* Read link and autonegotiation status */
2712 status = phy_read(phydev, MII_BMSR);
2713 diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
2714 index 18c709c484e7..f761d651c16e 100644
2715 --- a/drivers/net/wireless/ath/ath10k/ce.c
2716 +++ b/drivers/net/wireless/ath/ath10k/ce.c
2717 @@ -500,14 +500,8 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
2718 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
2719
2720 /* WORKAROUND */
2721 - if (!(flags & CE_SEND_FLAG_GATHER)) {
2722 - if (ar->hw_params.shadow_reg_support)
2723 - ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
2724 - write_index);
2725 - else
2726 - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
2727 - write_index);
2728 - }
2729 + if (!(flags & CE_SEND_FLAG_GATHER))
2730 + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
2731
2732 src_ring->write_index = write_index;
2733 exit:
2734 @@ -581,8 +575,14 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
2735 /* Update Source Ring Write Index */
2736 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
2737
2738 - if (!(flags & CE_SEND_FLAG_GATHER))
2739 - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
2740 + if (!(flags & CE_SEND_FLAG_GATHER)) {
2741 + if (ar->hw_params.shadow_reg_support)
2742 + ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
2743 + write_index);
2744 + else
2745 + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
2746 + write_index);
2747 + }
2748
2749 src_ring->write_index = write_index;
2750 exit:
2751 @@ -1394,12 +1394,12 @@ static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
2752 u32 nentries)
2753 {
2754 src_ring->shadow_base_unaligned = kcalloc(nentries,
2755 - sizeof(struct ce_desc),
2756 + sizeof(struct ce_desc_64),
2757 GFP_KERNEL);
2758 if (!src_ring->shadow_base_unaligned)
2759 return -ENOMEM;
2760
2761 - src_ring->shadow_base = (struct ce_desc *)
2762 + src_ring->shadow_base = (struct ce_desc_64 *)
2763 PTR_ALIGN(src_ring->shadow_base_unaligned,
2764 CE_DESC_RING_ALIGN);
2765 return 0;
2766 @@ -1453,7 +1453,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
2767 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
2768 if (ret) {
2769 dma_free_coherent(ar->dev,
2770 - (nentries * sizeof(struct ce_desc) +
2771 + (nentries * sizeof(struct ce_desc_64) +
2772 CE_DESC_RING_ALIGN),
2773 src_ring->base_addr_owner_space_unaligned,
2774 base_addr);
2775 diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
2776 index b8fb5382dede..8088f7a66426 100644
2777 --- a/drivers/net/wireless/ath/ath10k/ce.h
2778 +++ b/drivers/net/wireless/ath/ath10k/ce.h
2779 @@ -118,7 +118,7 @@ struct ath10k_ce_ring {
2780 u32 base_addr_ce_space;
2781
2782 char *shadow_base_unaligned;
2783 - struct ce_desc *shadow_base;
2784 + struct ce_desc_64 *shadow_base;
2785
2786 /* keep last */
2787 void *per_transfer_context[0];
2788 diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
2789 index f79c337105cb..2daf33342b23 100644
2790 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c
2791 +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
2792 @@ -1420,6 +1420,12 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
2793 u8 *buf, *dpos;
2794 const u8 *spos;
2795
2796 + if (!ies1)
2797 + ies1_len = 0;
2798 +
2799 + if (!ies2)
2800 + ies2_len = 0;
2801 +
2802 if (ies1_len == 0 && ies2_len == 0) {
2803 *merged_ies = NULL;
2804 *merged_len = 0;
2805 @@ -1429,17 +1435,19 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
2806 buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
2807 if (!buf)
2808 return -ENOMEM;
2809 - memcpy(buf, ies1, ies1_len);
2810 + if (ies1)
2811 + memcpy(buf, ies1, ies1_len);
2812 dpos = buf + ies1_len;
2813 spos = ies2;
2814 - while (spos + 1 < ies2 + ies2_len) {
2815 + while (spos && (spos + 1 < ies2 + ies2_len)) {
2816 /* IE tag at offset 0, length at offset 1 */
2817 u16 ielen = 2 + spos[1];
2818
2819 if (spos + ielen > ies2 + ies2_len)
2820 break;
2821 if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
2822 - !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) {
2823 + (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len,
2824 + spos, ielen))) {
2825 memcpy(dpos, spos, ielen);
2826 dpos += ielen;
2827 }
2828 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
2829 index cd3651069d0c..27893af63ebc 100644
2830 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
2831 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
2832 @@ -149,7 +149,7 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
2833 return err;
2834 }
2835
2836 - err = request_firmware(&clm, clm_name, bus->dev);
2837 + err = firmware_request_nowarn(&clm, clm_name, bus->dev);
2838 if (err) {
2839 brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
2840 err);
2841 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2842 index 16c6c7f921a8..8b7d70e3a379 100644
2843 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2844 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
2845 @@ -132,13 +132,17 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
2846
2847 static int iwl_configure_rxq(struct iwl_mvm *mvm)
2848 {
2849 - int i, num_queues, size;
2850 + int i, num_queues, size, ret;
2851 struct iwl_rfh_queue_config *cmd;
2852 + struct iwl_host_cmd hcmd = {
2853 + .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
2854 + .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
2855 + };
2856
2857 /* Do not configure default queue, it is configured via context info */
2858 num_queues = mvm->trans->num_rx_queues - 1;
2859
2860 - size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
2861 + size = struct_size(cmd, data, num_queues);
2862
2863 cmd = kzalloc(size, GFP_KERNEL);
2864 if (!cmd)
2865 @@ -159,10 +163,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm)
2866 cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
2867 }
2868
2869 - return iwl_mvm_send_cmd_pdu(mvm,
2870 - WIDE_ID(DATA_PATH_GROUP,
2871 - RFH_QUEUE_CONFIG_CMD),
2872 - 0, size, cmd);
2873 + hcmd.data[0] = cmd;
2874 + hcmd.len[0] = size;
2875 +
2876 + ret = iwl_mvm_send_cmd(mvm, &hcmd);
2877 +
2878 + kfree(cmd);
2879 +
2880 + return ret;
2881 }
2882
2883 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
2884 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
2885 index d4a31e014c82..b2905f01b7df 100644
2886 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
2887 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
2888 @@ -502,7 +502,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
2889 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2890 struct iwl_rb_allocator *rba = &trans_pcie->rba;
2891 struct list_head local_empty;
2892 - int pending = atomic_xchg(&rba->req_pending, 0);
2893 + int pending = atomic_read(&rba->req_pending);
2894
2895 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
2896
2897 @@ -557,11 +557,13 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
2898 i++;
2899 }
2900
2901 + atomic_dec(&rba->req_pending);
2902 pending--;
2903 +
2904 if (!pending) {
2905 - pending = atomic_xchg(&rba->req_pending, 0);
2906 + pending = atomic_read(&rba->req_pending);
2907 IWL_DEBUG_RX(trans,
2908 - "Pending allocation requests = %d\n",
2909 + "Got more pending allocation requests = %d\n",
2910 pending);
2911 }
2912
2913 @@ -573,12 +575,15 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
2914 spin_unlock(&rba->lock);
2915
2916 atomic_inc(&rba->req_ready);
2917 +
2918 }
2919
2920 spin_lock(&rba->lock);
2921 /* return unused rbds to the allocator empty list */
2922 list_splice_tail(&local_empty, &rba->rbd_empty);
2923 spin_unlock(&rba->lock);
2924 +
2925 + IWL_DEBUG_RX(trans, "%s, exit.\n", __func__);
2926 }
2927
2928 /*
2929 diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
2930 index adc88433faa8..2d87ebbfa4da 100644
2931 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
2932 +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
2933 @@ -4282,11 +4282,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2934 wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
2935 wiphy->max_remain_on_channel_duration = 5000;
2936 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2937 - BIT(NL80211_IFTYPE_ADHOC) |
2938 BIT(NL80211_IFTYPE_P2P_CLIENT) |
2939 BIT(NL80211_IFTYPE_P2P_GO) |
2940 BIT(NL80211_IFTYPE_AP);
2941
2942 + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
2943 + wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
2944 +
2945 wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
2946 if (adapter->config_bands & BAND_A)
2947 wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
2948 @@ -4346,11 +4348,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
2949 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
2950 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
2951
2952 - wiphy->features |= NL80211_FEATURE_HT_IBSS |
2953 - NL80211_FEATURE_INACTIVITY_TIMER |
2954 + wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER |
2955 NL80211_FEATURE_LOW_PRIORITY_SCAN |
2956 NL80211_FEATURE_NEED_OBSS_SCAN;
2957
2958 + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
2959 + wiphy->features |= NL80211_FEATURE_HT_IBSS;
2960 +
2961 if (ISSUPP_RANDOM_MAC(adapter->fw_cap_info))
2962 wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
2963 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
2964 diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
2965 index 530e5593765c..a1529920d877 100644
2966 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c
2967 +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
2968 @@ -54,22 +54,30 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
2969 part = np->name;
2970
2971 mtd = get_mtd_device_nm(part);
2972 - if (IS_ERR(mtd))
2973 - return PTR_ERR(mtd);
2974 + if (IS_ERR(mtd)) {
2975 + ret = PTR_ERR(mtd);
2976 + goto out_put_node;
2977 + }
2978
2979 - if (size <= sizeof(*list))
2980 - return -EINVAL;
2981 + if (size <= sizeof(*list)) {
2982 + ret = -EINVAL;
2983 + goto out_put_node;
2984 + }
2985
2986 offset = be32_to_cpup(list);
2987 ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
2988 put_mtd_device(mtd);
2989 if (ret)
2990 - return ret;
2991 + goto out_put_node;
2992
2993 - if (retlen < len)
2994 - return -EINVAL;
2995 + if (retlen < len) {
2996 + ret = -EINVAL;
2997 + goto out_put_node;
2998 + }
2999
3000 - return 0;
3001 +out_put_node:
3002 + of_node_put(np);
3003 + return ret;
3004 #else
3005 return -ENOENT;
3006 #endif
3007 diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
3008 index 79e59f2379a2..8d40e92fb6f2 100644
3009 --- a/drivers/net/wireless/mediatek/mt76/usb.c
3010 +++ b/drivers/net/wireless/mediatek/mt76/usb.c
3011 @@ -796,16 +796,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev)
3012
3013 err = mt76u_alloc_rx(dev);
3014 if (err < 0)
3015 - goto err;
3016 -
3017 - err = mt76u_alloc_tx(dev);
3018 - if (err < 0)
3019 - goto err;
3020 + return err;
3021
3022 - return 0;
3023 -err:
3024 - mt76u_queues_deinit(dev);
3025 - return err;
3026 + return mt76u_alloc_tx(dev);
3027 }
3028 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
3029
3030 diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
3031 index 662d12703b69..57b503ae63f1 100644
3032 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h
3033 +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
3034 @@ -17,7 +17,7 @@
3035
3036 struct mt7601u_dev;
3037
3038 -#define MT7601U_EE_MAX_VER 0x0c
3039 +#define MT7601U_EE_MAX_VER 0x0d
3040 #define MT7601U_EEPROM_SIZE 256
3041
3042 #define MT7601U_DEFAULT_TX_POWER 6
3043 diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
3044 index 19e3c5a0b715..2ca5658bbc2a 100644
3045 --- a/drivers/net/wireless/ti/wlcore/main.c
3046 +++ b/drivers/net/wireless/ti/wlcore/main.c
3047 @@ -1084,8 +1084,11 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
3048 goto out;
3049
3050 ret = wl12xx_fetch_firmware(wl, plt);
3051 - if (ret < 0)
3052 - goto out;
3053 + if (ret < 0) {
3054 + kfree(wl->fw_status);
3055 + kfree(wl->raw_fw_status);
3056 + kfree(wl->tx_res_if);
3057 + }
3058
3059 out:
3060 return ret;
3061 diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
3062 index 3ed67676ea2a..e85c5a8206c4 100644
3063 --- a/drivers/pci/pcie/pme.c
3064 +++ b/drivers/pci/pcie/pme.c
3065 @@ -363,6 +363,16 @@ static bool pcie_pme_check_wakeup(struct pci_bus *bus)
3066 return false;
3067 }
3068
3069 +static void pcie_pme_disable_interrupt(struct pci_dev *port,
3070 + struct pcie_pme_service_data *data)
3071 +{
3072 + spin_lock_irq(&data->lock);
3073 + pcie_pme_interrupt_enable(port, false);
3074 + pcie_clear_root_pme_status(port);
3075 + data->noirq = true;
3076 + spin_unlock_irq(&data->lock);
3077 +}
3078 +
3079 /**
3080 * pcie_pme_suspend - Suspend PCIe PME service device.
3081 * @srv: PCIe service device to suspend.
3082 @@ -387,11 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
3083 return 0;
3084 }
3085
3086 - spin_lock_irq(&data->lock);
3087 - pcie_pme_interrupt_enable(port, false);
3088 - pcie_clear_root_pme_status(port);
3089 - data->noirq = true;
3090 - spin_unlock_irq(&data->lock);
3091 + pcie_pme_disable_interrupt(port, data);
3092
3093 synchronize_irq(srv->irq);
3094
3095 @@ -427,9 +433,11 @@ static int pcie_pme_resume(struct pcie_device *srv)
3096 */
3097 static void pcie_pme_remove(struct pcie_device *srv)
3098 {
3099 - pcie_pme_suspend(srv);
3100 + struct pcie_pme_service_data *data = get_service_data(srv);
3101 +
3102 + pcie_pme_disable_interrupt(srv->port, data);
3103 free_irq(srv->irq, srv);
3104 - kfree(get_service_data(srv));
3105 + kfree(data);
3106 }
3107
3108 static struct pcie_port_service_driver pcie_pme_driver = {
3109 diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
3110 index e1a77b2de78a..3623f6489f49 100644
3111 --- a/drivers/perf/arm_spe_pmu.c
3112 +++ b/drivers/perf/arm_spe_pmu.c
3113 @@ -824,10 +824,10 @@ static void arm_spe_pmu_read(struct perf_event *event)
3114 {
3115 }
3116
3117 -static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages,
3118 - bool snapshot)
3119 +static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
3120 + int nr_pages, bool snapshot)
3121 {
3122 - int i;
3123 + int i, cpu = event->cpu;
3124 struct page **pglist;
3125 struct arm_spe_pmu_buf *buf;
3126
3127 diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
3128 index ead4beb5f55f..036124fd363c 100644
3129 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c
3130 +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
3131 @@ -346,6 +346,8 @@ static const unsigned int eth_rx_dv_pins[] = { DIF_1_P };
3132 static const unsigned int eth_rx_clk_pins[] = { DIF_1_N };
3133 static const unsigned int eth_txd0_1_pins[] = { DIF_2_P };
3134 static const unsigned int eth_txd1_1_pins[] = { DIF_2_N };
3135 +static const unsigned int eth_rxd3_pins[] = { DIF_2_P };
3136 +static const unsigned int eth_rxd2_pins[] = { DIF_2_N };
3137 static const unsigned int eth_tx_en_pins[] = { DIF_3_P };
3138 static const unsigned int eth_ref_clk_pins[] = { DIF_3_N };
3139 static const unsigned int eth_mdc_pins[] = { DIF_4_P };
3140 @@ -571,6 +573,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
3141 GROUP(eth_ref_clk, 6, 8),
3142 GROUP(eth_mdc, 6, 9),
3143 GROUP(eth_mdio_en, 6, 10),
3144 + GROUP(eth_rxd3, 7, 22),
3145 + GROUP(eth_rxd2, 7, 23),
3146 };
3147
3148 static struct meson_pmx_group meson8b_aobus_groups[] = {
3149 @@ -720,7 +724,7 @@ static const char * const ethernet_groups[] = {
3150 "eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
3151 "eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
3152 "eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
3153 - "eth_txd2", "eth_txd3"
3154 + "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2"
3155 };
3156
3157 static const char * const i2c_a_groups[] = {
3158 diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
3159 index b6d44550d98c..eca16d00e310 100644
3160 --- a/drivers/platform/mellanox/mlxreg-hotplug.c
3161 +++ b/drivers/platform/mellanox/mlxreg-hotplug.c
3162 @@ -248,7 +248,8 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
3163 struct mlxreg_core_item *item)
3164 {
3165 struct mlxreg_core_data *data;
3166 - u32 asserted, regval, bit;
3167 + unsigned long asserted;
3168 + u32 regval, bit;
3169 int ret;
3170
3171 /*
3172 @@ -281,7 +282,7 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
3173 asserted = item->cache ^ regval;
3174 item->cache = regval;
3175
3176 - for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
3177 + for_each_set_bit(bit, &asserted, 8) {
3178 data = item->data + bit;
3179 if (regval & BIT(bit)) {
3180 if (item->inversed)
3181 diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
3182 index d4f1259ff5a2..62d4b94e2531 100644
3183 --- a/drivers/platform/x86/ideapad-laptop.c
3184 +++ b/drivers/platform/x86/ideapad-laptop.c
3185 @@ -989,7 +989,7 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
3186 .ident = "Lenovo RESCUER R720-15IKBN",
3187 .matches = {
3188 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
3189 - DMI_MATCH(DMI_BOARD_NAME, "80WW"),
3190 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"),
3191 },
3192 },
3193 {
3194 diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
3195 index 6cf9b7fa5bf0..3201a83073b5 100644
3196 --- a/drivers/platform/x86/intel-hid.c
3197 +++ b/drivers/platform/x86/intel-hid.c
3198 @@ -373,7 +373,7 @@ wakeup:
3199 * the 5-button array, but still send notifies with power button
3200 * event code to this device object on power button actions.
3201 *
3202 - * Report the power button press; catch and ignore the button release.
3203 + * Report the power button press and release.
3204 */
3205 if (!priv->array) {
3206 if (event == 0xce) {
3207 @@ -382,8 +382,11 @@ wakeup:
3208 return;
3209 }
3210
3211 - if (event == 0xcf)
3212 + if (event == 0xcf) {
3213 + input_report_key(priv->input_dev, KEY_POWER, 0);
3214 + input_sync(priv->input_dev);
3215 return;
3216 + }
3217 }
3218
3219 /* 0xC0 is for HID events, other values are for 5 button array */
3220 diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
3221 index 2d272a3e0176..e0dcdb3cc070 100644
3222 --- a/drivers/platform/x86/intel_pmc_core.c
3223 +++ b/drivers/platform/x86/intel_pmc_core.c
3224 @@ -333,7 +333,8 @@ static int pmc_core_ppfear_sts_show(struct seq_file *s, void *unused)
3225 index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
3226 pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
3227
3228 - for (index = 0; map[index].name; index++)
3229 + for (index = 0; map[index].name &&
3230 + index < pmcdev->map->ppfear_buckets * 8; index++)
3231 pmc_core_display_map(s, index, pf_regs[index / 8], map);
3232
3233 return 0;
3234 diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
3235 index 93a7e99e1f8b..3f9711b03cb4 100644
3236 --- a/drivers/platform/x86/intel_pmc_core.h
3237 +++ b/drivers/platform/x86/intel_pmc_core.h
3238 @@ -39,7 +39,7 @@
3239 #define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
3240 #define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1)
3241 #define MTPMC_MASK 0xffff0000
3242 -#define PPFEAR_MAX_NUM_ENTRIES 5
3243 +#define PPFEAR_MAX_NUM_ENTRIES 12
3244 #define SPT_PPFEAR_NUM_ENTRIES 5
3245 #define SPT_PMC_READ_DISABLE_BIT 0x16
3246 #define SPT_PMC_MSG_FULL_STS_BIT 0x18
3247 diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
3248 index 21e20483bd91..e0239cf3f56d 100644
3249 --- a/drivers/regulator/act8865-regulator.c
3250 +++ b/drivers/regulator/act8865-regulator.c
3251 @@ -131,7 +131,7 @@
3252 * ACT8865 voltage number
3253 */
3254 #define ACT8865_VOLTAGE_NUM 64
3255 -#define ACT8600_SUDCDC_VOLTAGE_NUM 255
3256 +#define ACT8600_SUDCDC_VOLTAGE_NUM 256
3257
3258 struct act8865 {
3259 struct regmap *regmap;
3260 @@ -222,7 +222,8 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
3261 REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0),
3262 REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000),
3263 REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000),
3264 - REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000),
3265 + REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000),
3266 + REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
3267 };
3268
3269 static struct regulator_ops act8865_ops = {
3270 diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
3271 index 8684bcec8ff4..00cc96341411 100644
3272 --- a/drivers/s390/net/ism_drv.c
3273 +++ b/drivers/s390/net/ism_drv.c
3274 @@ -141,10 +141,13 @@ static int register_ieq(struct ism_dev *ism)
3275
3276 static int unregister_sba(struct ism_dev *ism)
3277 {
3278 + int ret;
3279 +
3280 if (!ism->sba)
3281 return 0;
3282
3283 - if (ism_cmd_simple(ism, ISM_UNREG_SBA))
3284 + ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
3285 + if (ret && ret != ISM_ERROR)
3286 return -EIO;
3287
3288 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
3289 @@ -158,10 +161,13 @@ static int unregister_sba(struct ism_dev *ism)
3290
3291 static int unregister_ieq(struct ism_dev *ism)
3292 {
3293 + int ret;
3294 +
3295 if (!ism->ieq)
3296 return 0;
3297
3298 - if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
3299 + ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
3300 + if (ret && ret != ISM_ERROR)
3301 return -EIO;
3302
3303 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
3304 @@ -288,7 +294,7 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
3305 cmd.request.dmb_tok = dmb->dmb_tok;
3306
3307 ret = ism_cmd(ism, &cmd);
3308 - if (ret)
3309 + if (ret && ret != ISM_ERROR)
3310 goto out;
3311
3312 ism_free_dmb(ism, dmb);
3313 diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
3314 index 3f97ec4aac4b..780651c4fc0c 100644
3315 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
3316 +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
3317 @@ -1445,7 +1445,7 @@ bind_err:
3318 static struct bnx2fc_interface *
3319 bnx2fc_interface_create(struct bnx2fc_hba *hba,
3320 struct net_device *netdev,
3321 - enum fip_state fip_mode)
3322 + enum fip_mode fip_mode)
3323 {
3324 struct fcoe_ctlr_device *ctlr_dev;
3325 struct bnx2fc_interface *interface;
3326 diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
3327 index f46b312d04bc..6768b2e8148a 100644
3328 --- a/drivers/scsi/fcoe/fcoe.c
3329 +++ b/drivers/scsi/fcoe/fcoe.c
3330 @@ -390,7 +390,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
3331 * Returns: pointer to a struct fcoe_interface or NULL on error
3332 */
3333 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
3334 - enum fip_state fip_mode)
3335 + enum fip_mode fip_mode)
3336 {
3337 struct fcoe_ctlr_device *ctlr_dev;
3338 struct fcoe_ctlr *ctlr;
3339 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
3340 index 54da3166da8d..7dc4ffa24430 100644
3341 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
3342 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
3343 @@ -147,7 +147,7 @@ static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip)
3344 * fcoe_ctlr_init() - Initialize the FCoE Controller instance
3345 * @fip: The FCoE controller to initialize
3346 */
3347 -void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
3348 +void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_mode mode)
3349 {
3350 fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
3351 fip->mode = mode;
3352 @@ -454,7 +454,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
3353 mutex_unlock(&fip->ctlr_mutex);
3354 fc_linkup(fip->lp);
3355 } else if (fip->state == FIP_ST_LINK_WAIT) {
3356 - fcoe_ctlr_set_state(fip, fip->mode);
3357 + if (fip->mode == FIP_MODE_NON_FIP)
3358 + fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
3359 + else
3360 + fcoe_ctlr_set_state(fip, FIP_ST_AUTO);
3361 switch (fip->mode) {
3362 default:
3363 LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
3364 diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
3365 index f4909cd206d3..f15d5e1d56b1 100644
3366 --- a/drivers/scsi/fcoe/fcoe_transport.c
3367 +++ b/drivers/scsi/fcoe/fcoe_transport.c
3368 @@ -873,7 +873,7 @@ static int fcoe_transport_create(const char *buffer,
3369 int rc = -ENODEV;
3370 struct net_device *netdev = NULL;
3371 struct fcoe_transport *ft = NULL;
3372 - enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
3373 + enum fip_mode fip_mode = (enum fip_mode)kp->arg;
3374
3375 mutex_lock(&ft_mutex);
3376
3377 diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
3378 index a4e2e6aa9a6b..fd9d82c9033d 100644
3379 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c
3380 +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
3381 @@ -10,6 +10,7 @@
3382 */
3383
3384 #include "hisi_sas.h"
3385 +#include "../libsas/sas_internal.h"
3386 #define DRV_NAME "hisi_sas"
3387
3388 #define DEV_IS_GONE(dev) \
3389 @@ -809,7 +810,8 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
3390 spin_lock_irqsave(&task->task_state_lock, flags);
3391 task->task_state_flags &=
3392 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
3393 - task->task_state_flags |= SAS_TASK_STATE_DONE;
3394 + if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
3395 + task->task_state_flags |= SAS_TASK_STATE_DONE;
3396 spin_unlock_irqrestore(&task->task_state_lock, flags);
3397 }
3398
3399 @@ -1879,9 +1881,18 @@ static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
3400
3401 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
3402 {
3403 + struct asd_sas_phy *sas_phy = &phy->sas_phy;
3404 + struct sas_phy *sphy = sas_phy->phy;
3405 + struct sas_phy_data *d = sphy->hostdata;
3406 +
3407 phy->phy_attached = 0;
3408 phy->phy_type = 0;
3409 phy->port = NULL;
3410 +
3411 + if (d->enable)
3412 + sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
3413 + else
3414 + sphy->negotiated_linkrate = SAS_PHY_DISABLED;
3415 }
3416
3417 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
3418 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
3419 index f6de7526ded5..acb503ea8f0c 100644
3420 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
3421 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
3422 @@ -4155,6 +4155,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
3423 if (megasas_create_frame_pool(instance)) {
3424 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
3425 megasas_free_cmds(instance);
3426 + return -ENOMEM;
3427 }
3428
3429 return 0;
3430 diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
3431 index 0a5dd5595dd3..cd61905ca2f5 100644
3432 --- a/drivers/scsi/qedf/qedf_main.c
3433 +++ b/drivers/scsi/qedf/qedf_main.c
3434 @@ -1418,7 +1418,7 @@ static struct libfc_function_template qedf_lport_template = {
3435
3436 static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
3437 {
3438 - fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
3439 + fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
3440
3441 qedf->ctlr.send = qedf_fip_send;
3442 qedf->ctlr.get_src_addr = qedf_get_src_mac;
3443 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
3444 index 78ca63dfba4a..9a7e3a3bd5ce 100644
3445 --- a/drivers/scsi/scsi_scan.c
3446 +++ b/drivers/scsi/scsi_scan.c
3447 @@ -220,7 +220,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
3448 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
3449
3450 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
3451 - GFP_ATOMIC);
3452 + GFP_KERNEL);
3453 if (!sdev)
3454 goto out;
3455
3456 @@ -796,7 +796,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
3457 */
3458 sdev->inquiry = kmemdup(inq_result,
3459 max_t(size_t, sdev->inquiry_len, 36),
3460 - GFP_ATOMIC);
3461 + GFP_KERNEL);
3462 if (sdev->inquiry == NULL)
3463 return SCSI_SCAN_NO_RESPONSE;
3464
3465 @@ -1087,7 +1087,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
3466 if (!sdev)
3467 goto out;
3468
3469 - result = kmalloc(result_len, GFP_ATOMIC |
3470 + result = kmalloc(result_len, GFP_KERNEL |
3471 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
3472 if (!result)
3473 goto out_free_sdev;
3474 diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
3475 index 09c669e70d63..038abc377fdb 100644
3476 --- a/drivers/soc/qcom/qcom_gsbi.c
3477 +++ b/drivers/soc/qcom/qcom_gsbi.c
3478 @@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
3479 struct resource *res;
3480 void __iomem *base;
3481 struct gsbi_info *gsbi;
3482 - int i;
3483 + int i, ret;
3484 u32 mask, gsbi_num;
3485 const struct crci_config *config = NULL;
3486
3487 @@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev)
3488
3489 platform_set_drvdata(pdev, gsbi);
3490
3491 - return of_platform_populate(node, NULL, NULL, &pdev->dev);
3492 + ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3493 + if (ret)
3494 + clk_disable_unprepare(gsbi->hclk);
3495 + return ret;
3496 }
3497
3498 static int gsbi_remove(struct platform_device *pdev)
3499 diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
3500 index a33ee8ef8b6b..51625703399e 100644
3501 --- a/drivers/soc/tegra/fuse/fuse-tegra.c
3502 +++ b/drivers/soc/tegra/fuse/fuse-tegra.c
3503 @@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev)
3504 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3505 fuse->phys = res->start;
3506 fuse->base = devm_ioremap_resource(&pdev->dev, res);
3507 - if (IS_ERR(fuse->base))
3508 - return PTR_ERR(fuse->base);
3509 + if (IS_ERR(fuse->base)) {
3510 + err = PTR_ERR(fuse->base);
3511 + fuse->base = base;
3512 + return err;
3513 + }
3514
3515 fuse->clk = devm_clk_get(&pdev->dev, "fuse");
3516 if (IS_ERR(fuse->clk)) {
3517 dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
3518 PTR_ERR(fuse->clk));
3519 + fuse->base = base;
3520 return PTR_ERR(fuse->clk);
3521 }
3522
3523 @@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
3524
3525 if (fuse->soc->probe) {
3526 err = fuse->soc->probe(fuse);
3527 - if (err < 0)
3528 + if (err < 0) {
3529 + fuse->base = base;
3530 return err;
3531 + }
3532 }
3533
3534 if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
3535 diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c
3536 index d045b5568e0f..578aa6824ad3 100644
3537 --- a/drivers/staging/mt7621-spi/spi-mt7621.c
3538 +++ b/drivers/staging/mt7621-spi/spi-mt7621.c
3539 @@ -429,6 +429,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
3540 int status = 0;
3541 struct clk *clk;
3542 struct mt7621_spi_ops *ops;
3543 + int ret;
3544
3545 match = of_match_device(mt7621_spi_match, &pdev->dev);
3546 if (!match)
3547 @@ -476,7 +477,11 @@ static int mt7621_spi_probe(struct platform_device *pdev)
3548 rs->pending_write = 0;
3549 dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
3550
3551 - device_reset(&pdev->dev);
3552 + ret = device_reset(&pdev->dev);
3553 + if (ret) {
3554 + dev_err(&pdev->dev, "SPI reset failed!\n");
3555 + return ret;
3556 + }
3557
3558 mt7621_spi_reset(rs, 0);
3559
3560 diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
3561 index b9bcbe20a2be..c47188860e32 100644
3562 --- a/drivers/tty/serial/8250/8250_pxa.c
3563 +++ b/drivers/tty/serial/8250/8250_pxa.c
3564 @@ -113,6 +113,10 @@ static int serial_pxa_probe(struct platform_device *pdev)
3565 if (ret)
3566 return ret;
3567
3568 + ret = of_alias_get_id(pdev->dev.of_node, "serial");
3569 + if (ret >= 0)
3570 + uart.port.line = ret;
3571 +
3572 uart.port.type = PORT_XSCALE;
3573 uart.port.iotype = UPIO_MEM32;
3574 uart.port.mapbase = mmres->start;
3575 diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
3576 index bfdd5ad4116f..1cb80fe5f95c 100644
3577 --- a/drivers/tty/serial/atmel_serial.c
3578 +++ b/drivers/tty/serial/atmel_serial.c
3579 @@ -163,6 +163,8 @@ struct atmel_uart_port {
3580 unsigned int pending_status;
3581 spinlock_t lock_suspended;
3582
3583 + bool hd_start_rx; /* can start RX during half-duplex operation */
3584 +
3585 #ifdef CONFIG_PM
3586 struct {
3587 u32 cr;
3588 @@ -225,6 +227,12 @@ static inline void atmel_uart_write_char(struct uart_port *port, u8 value)
3589 __raw_writeb(value, port->membase + ATMEL_US_THR);
3590 }
3591
3592 +static inline int atmel_uart_is_half_duplex(struct uart_port *port)
3593 +{
3594 + return (port->rs485.flags & SER_RS485_ENABLED) &&
3595 + !(port->rs485.flags & SER_RS485_RX_DURING_TX);
3596 +}
3597 +
3598 #ifdef CONFIG_SERIAL_ATMEL_PDC
3599 static bool atmel_use_pdc_rx(struct uart_port *port)
3600 {
3601 @@ -481,9 +489,9 @@ static void atmel_stop_tx(struct uart_port *port)
3602 /* Disable interrupts */
3603 atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
3604
3605 - if ((port->rs485.flags & SER_RS485_ENABLED) &&
3606 - !(port->rs485.flags & SER_RS485_RX_DURING_TX))
3607 + if (atmel_uart_is_half_duplex(port))
3608 atmel_start_rx(port);
3609 +
3610 }
3611
3612 /*
3613 @@ -500,8 +508,7 @@ static void atmel_start_tx(struct uart_port *port)
3614 return;
3615
3616 if (atmel_use_pdc_tx(port) || atmel_use_dma_tx(port))
3617 - if ((port->rs485.flags & SER_RS485_ENABLED) &&
3618 - !(port->rs485.flags & SER_RS485_RX_DURING_TX))
3619 + if (atmel_uart_is_half_duplex(port))
3620 atmel_stop_rx(port);
3621
3622 if (atmel_use_pdc_tx(port))
3623 @@ -799,10 +806,14 @@ static void atmel_complete_tx_dma(void *arg)
3624 */
3625 if (!uart_circ_empty(xmit))
3626 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
3627 - else if ((port->rs485.flags & SER_RS485_ENABLED) &&
3628 - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
3629 - /* DMA done, stop TX, start RX for RS485 */
3630 - atmel_start_rx(port);
3631 + else if (atmel_uart_is_half_duplex(port)) {
3632 + /*
3633 + * DMA done, re-enable TXEMPTY and signal that we can stop
3634 + * TX and start RX for RS485
3635 + */
3636 + atmel_port->hd_start_rx = true;
3637 + atmel_uart_writel(port, ATMEL_US_IER,
3638 + atmel_port->tx_done_mask);
3639 }
3640
3641 spin_unlock_irqrestore(&port->lock, flags);
3642 @@ -1248,9 +1259,20 @@ atmel_handle_transmit(struct uart_port *port, unsigned int pending)
3643 struct atmel_uart_port *atmel_port = to_atmel_uart_port(port);
3644
3645 if (pending & atmel_port->tx_done_mask) {
3646 - /* Either PDC or interrupt transmission */
3647 atmel_uart_writel(port, ATMEL_US_IDR,
3648 atmel_port->tx_done_mask);
3649 +
3650 + /* Start RX if flag was set and FIFO is empty */
3651 + if (atmel_port->hd_start_rx) {
3652 + if (!(atmel_uart_readl(port, ATMEL_US_CSR)
3653 + & ATMEL_US_TXEMPTY))
3654 + dev_warn(port->dev, "Should start RX, but TX fifo is not empty\n");
3655 +
3656 + atmel_port->hd_start_rx = false;
3657 + atmel_start_rx(port);
3658 + return;
3659 + }
3660 +
3661 atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
3662 }
3663 }
3664 @@ -1377,8 +1399,7 @@ static void atmel_tx_pdc(struct uart_port *port)
3665 atmel_uart_writel(port, ATMEL_US_IER,
3666 atmel_port->tx_done_mask);
3667 } else {
3668 - if ((port->rs485.flags & SER_RS485_ENABLED) &&
3669 - !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
3670 + if (atmel_uart_is_half_duplex(port)) {
3671 /* DMA done, stop TX, start RX for RS485 */
3672 atmel_start_rx(port);
3673 }
3674 diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
3675 index ae3ce330200e..ee3aa57bc0e7 100644
3676 --- a/drivers/tty/tty_buffer.c
3677 +++ b/drivers/tty/tty_buffer.c
3678 @@ -26,7 +26,7 @@
3679 * Byte threshold to limit memory consumption for flip buffers.
3680 * The actual memory limit is > 2x this amount.
3681 */
3682 -#define TTYB_DEFAULT_MEM_LIMIT 65536
3683 +#define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL)
3684
3685 /*
3686 * We default to dicing tty buffer allocations to this many characters
3687 diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
3688 index 85fc6db48e44..159b897c5e80 100644
3689 --- a/drivers/usb/chipidea/core.c
3690 +++ b/drivers/usb/chipidea/core.c
3691 @@ -935,8 +935,15 @@ static int ci_hdrc_probe(struct platform_device *pdev)
3692 } else if (ci->platdata->usb_phy) {
3693 ci->usb_phy = ci->platdata->usb_phy;
3694 } else {
3695 + ci->usb_phy = devm_usb_get_phy_by_phandle(dev->parent, "phys",
3696 + 0);
3697 ci->phy = devm_phy_get(dev->parent, "usb-phy");
3698 - ci->usb_phy = devm_usb_get_phy(dev->parent, USB_PHY_TYPE_USB2);
3699 +
3700 + /* Fallback to grabbing any registered USB2 PHY */
3701 + if (IS_ERR(ci->usb_phy) &&
3702 + PTR_ERR(ci->usb_phy) != -EPROBE_DEFER)
3703 + ci->usb_phy = devm_usb_get_phy(dev->parent,
3704 + USB_PHY_TYPE_USB2);
3705
3706 /* if both generic PHY and USB PHY layers aren't enabled */
3707 if (PTR_ERR(ci->phy) == -ENOSYS &&
3708 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3709 index 700fb626ad03..524104eed8a7 100644
3710 --- a/drivers/usb/dwc3/gadget.c
3711 +++ b/drivers/usb/dwc3/gadget.c
3712 @@ -3233,6 +3233,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
3713 goto err4;
3714 }
3715
3716 + dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
3717 +
3718 return 0;
3719
3720 err4:
3721 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
3722 index 31e8bf3578c8..aa15593a3ac4 100644
3723 --- a/drivers/usb/gadget/function/f_fs.c
3724 +++ b/drivers/usb/gadget/function/f_fs.c
3725 @@ -1008,6 +1008,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
3726 * condition with req->complete callback.
3727 */
3728 usb_ep_dequeue(ep->ep, req);
3729 + wait_for_completion(&done);
3730 interrupted = ep->status < 0;
3731 }
3732
3733 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
3734 index 6bde543452f2..7ddc0930e98c 100644
3735 --- a/drivers/video/backlight/pwm_bl.c
3736 +++ b/drivers/video/backlight/pwm_bl.c
3737 @@ -425,7 +425,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
3738 */
3739
3740 /* if the enable GPIO is disabled, do not enable the backlight */
3741 - if (pb->enable_gpio && gpiod_get_value(pb->enable_gpio) == 0)
3742 + if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
3743 return FB_BLANK_POWERDOWN;
3744
3745 /* The regulator is disabled, do not enable the backlight */
3746 diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
3747 index 77cee99fc36c..c48f083d522a 100644
3748 --- a/drivers/video/fbdev/core/fbmem.c
3749 +++ b/drivers/video/fbdev/core/fbmem.c
3750 @@ -427,6 +427,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
3751 {
3752 unsigned int x;
3753
3754 + if (image->width > info->var.xres || image->height > info->var.yres)
3755 + return;
3756 +
3757 if (rotate == FB_ROTATE_UR) {
3758 for (x = 0;
3759 x < num && image->dx + image->width <= info->var.xres;
3760 diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
3761 index cba6b586bfbd..d97fcfc5e558 100644
3762 --- a/drivers/xen/gntdev-dmabuf.c
3763 +++ b/drivers/xen/gntdev-dmabuf.c
3764 @@ -80,6 +80,12 @@ struct gntdev_dmabuf_priv {
3765 struct list_head imp_list;
3766 /* This is the lock which protects dma_buf_xxx lists. */
3767 struct mutex lock;
3768 + /*
3769 + * We reference this file while exporting dma-bufs, so
3770 + * the grant device context is not destroyed while there are
3771 + * external users alive.
3772 + */
3773 + struct file *filp;
3774 };
3775
3776 /* DMA buffer export support. */
3777 @@ -311,6 +317,7 @@ static void dmabuf_exp_release(struct kref *kref)
3778
3779 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
3780 list_del(&gntdev_dmabuf->next);
3781 + fput(gntdev_dmabuf->priv->filp);
3782 kfree(gntdev_dmabuf);
3783 }
3784
3785 @@ -423,6 +430,7 @@ static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
3786 mutex_lock(&args->dmabuf_priv->lock);
3787 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
3788 mutex_unlock(&args->dmabuf_priv->lock);
3789 + get_file(gntdev_dmabuf->priv->filp);
3790 return 0;
3791
3792 fail:
3793 @@ -834,7 +842,7 @@ long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
3794 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
3795 }
3796
3797 -struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
3798 +struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
3799 {
3800 struct gntdev_dmabuf_priv *priv;
3801
3802 @@ -847,6 +855,8 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
3803 INIT_LIST_HEAD(&priv->exp_wait_list);
3804 INIT_LIST_HEAD(&priv->imp_list);
3805
3806 + priv->filp = filp;
3807 +
3808 return priv;
3809 }
3810
3811 diff --git a/drivers/xen/gntdev-dmabuf.h b/drivers/xen/gntdev-dmabuf.h
3812 index 7220a53d0fc5..3d9b9cf9d5a1 100644
3813 --- a/drivers/xen/gntdev-dmabuf.h
3814 +++ b/drivers/xen/gntdev-dmabuf.h
3815 @@ -14,7 +14,7 @@
3816 struct gntdev_dmabuf_priv;
3817 struct gntdev_priv;
3818
3819 -struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void);
3820 +struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp);
3821
3822 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
3823
3824 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
3825 index b0b02a501167..9d8e02cfd480 100644
3826 --- a/drivers/xen/gntdev.c
3827 +++ b/drivers/xen/gntdev.c
3828 @@ -600,7 +600,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
3829 mutex_init(&priv->lock);
3830
3831 #ifdef CONFIG_XEN_GNTDEV_DMABUF
3832 - priv->dmabuf_priv = gntdev_dmabuf_init();
3833 + priv->dmabuf_priv = gntdev_dmabuf_init(flip);
3834 if (IS_ERR(priv->dmabuf_priv)) {
3835 ret = PTR_ERR(priv->dmabuf_priv);
3836 kfree(priv);
3837 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
3838 index e1fcb28ad4cc..e46e83e87600 100644
3839 --- a/fs/btrfs/qgroup.c
3840 +++ b/fs/btrfs/qgroup.c
3841 @@ -2427,16 +2427,15 @@ out:
3842 /*
3843 * Two limits to commit transaction in advance.
3844 *
3845 - * For RATIO, it will be 1/RATIO of the remaining limit
3846 - * (excluding data and prealloc meta) as threshold.
3847 + * For RATIO, it will be 1/RATIO of the remaining limit as threshold.
3848 * For SIZE, it will be in byte unit as threshold.
3849 */
3850 -#define QGROUP_PERTRANS_RATIO 32
3851 -#define QGROUP_PERTRANS_SIZE SZ_32M
3852 +#define QGROUP_FREE_RATIO 32
3853 +#define QGROUP_FREE_SIZE SZ_32M
3854 static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
3855 const struct btrfs_qgroup *qg, u64 num_bytes)
3856 {
3857 - u64 limit;
3858 + u64 free;
3859 u64 threshold;
3860
3861 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
3862 @@ -2455,20 +2454,21 @@ static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
3863 */
3864 if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
3865 BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
3866 - if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
3867 - limit = qg->max_excl;
3868 - else
3869 - limit = qg->max_rfer;
3870 - threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] -
3871 - qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) /
3872 - QGROUP_PERTRANS_RATIO;
3873 - threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE);
3874 + if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
3875 + free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl;
3876 + threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO,
3877 + QGROUP_FREE_SIZE);
3878 + } else {
3879 + free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer;
3880 + threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO,
3881 + QGROUP_FREE_SIZE);
3882 + }
3883
3884 /*
3885 * Use transaction_kthread to commit transaction, so we no
3886 * longer need to bother nested transaction nor lock context.
3887 */
3888 - if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold)
3889 + if (free < threshold)
3890 btrfs_commit_transaction_locksafe(fs_info);
3891 }
3892
3893 diff --git a/fs/buffer.c b/fs/buffer.c
3894 index c083c4b3c1e7..a550e0d8e965 100644
3895 --- a/fs/buffer.c
3896 +++ b/fs/buffer.c
3897 @@ -3027,6 +3027,13 @@ void guard_bio_eod(int op, struct bio *bio)
3898 /* Uhhuh. We've got a bio that straddles the device size! */
3899 truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
3900
3901 + /*
3902 + * The bio contains more than one segment which spans EOD, just return
3903 + * and let IO layer turn it into an EIO
3904 + */
3905 + if (truncated_bytes > bvec->bv_len)
3906 + return;
3907 +
3908 /* Truncate the bio.. */
3909 bio->bi_iter.bi_size -= truncated_bytes;
3910 bvec->bv_len -= truncated_bytes;
3911 diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
3912 index 6b61df117fd4..563e2f6268c3 100644
3913 --- a/fs/cifs/cifs_dfs_ref.c
3914 +++ b/fs/cifs/cifs_dfs_ref.c
3915 @@ -271,9 +271,9 @@ static void dump_referral(const struct dfs_info3_param *ref)
3916 {
3917 cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
3918 cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
3919 - cifs_dbg(FYI, "DFS: fl: %hd, srv_type: %hd\n",
3920 + cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
3921 ref->flags, ref->server_type);
3922 - cifs_dbg(FYI, "DFS: ref_flags: %hd, path_consumed: %hd\n",
3923 + cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
3924 ref->ref_flag, ref->path_consumed);
3925 }
3926
3927 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
3928 index a5ea742654aa..f31339db45fd 100644
3929 --- a/fs/cifs/connect.c
3930 +++ b/fs/cifs/connect.c
3931 @@ -1347,6 +1347,11 @@ cifs_parse_devname(const char *devname, struct smb_vol *vol)
3932 const char *delims = "/\\";
3933 size_t len;
3934
3935 + if (unlikely(!devname || !*devname)) {
3936 + cifs_dbg(VFS, "Device name not specified.\n");
3937 + return -EINVAL;
3938 + }
3939 +
3940 /* make sure we have a valid UNC double delimiter prefix */
3941 len = strspn(devname, delims);
3942 if (len != 2)
3943 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
3944 index 08761a6a039d..d847132ab027 100644
3945 --- a/fs/cifs/file.c
3946 +++ b/fs/cifs/file.c
3947 @@ -1631,8 +1631,20 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
3948 rc = server->ops->mand_unlock_range(cfile, flock, xid);
3949
3950 out:
3951 - if (flock->fl_flags & FL_POSIX && !rc)
3952 + if (flock->fl_flags & FL_POSIX) {
3953 + /*
3954 + * If this is a request to remove all locks because we
3955 + * are closing the file, it doesn't matter if the
3956 + * unlocking failed as both cifs.ko and the SMB server
3957 + * remove the lock on file close
3958 + */
3959 + if (rc) {
3960 + cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
3961 + if (!(flock->fl_flags & FL_CLOSE))
3962 + return rc;
3963 + }
3964 rc = locks_lock_file_wait(file, flock);
3965 + }
3966 return rc;
3967 }
3968
3969 diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
3970 index 378151e09e91..47db8eb6cbcf 100644
3971 --- a/fs/cifs/smb1ops.c
3972 +++ b/fs/cifs/smb1ops.c
3973 @@ -308,7 +308,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
3974 remaining = tgt_total_cnt - total_in_tgt;
3975
3976 if (remaining < 0) {
3977 - cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%hu\n",
3978 + cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%u\n",
3979 tgt_total_cnt, total_in_tgt);
3980 return -EPROTO;
3981 }
3982 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
3983 index 3d0db37d64ad..71f32d983384 100644
3984 --- a/fs/cifs/smb2pdu.c
3985 +++ b/fs/cifs/smb2pdu.c
3986 @@ -881,8 +881,14 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
3987 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
3988 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
3989 (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
3990 -
3991 - if (rc != 0) {
3992 + if (rc == -EOPNOTSUPP) {
3993 + /*
3994 + * Old Windows versions or Netapp SMB server can return
3995 + * not supported error. Client should accept it.
3996 + */
3997 + cifs_dbg(VFS, "Server does not support validate negotiate\n");
3998 + return 0;
3999 + } else if (rc != 0) {
4000 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
4001 rc = -EIO;
4002 goto out_free_inbuf;
4003 diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
4004 index 9e96a0bd08d9..e1801b288847 100644
4005 --- a/fs/ext4/indirect.c
4006 +++ b/fs/ext4/indirect.c
4007 @@ -1219,6 +1219,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
4008 ext4_lblk_t offsets[4], offsets2[4];
4009 Indirect chain[4], chain2[4];
4010 Indirect *partial, *partial2;
4011 + Indirect *p = NULL, *p2 = NULL;
4012 ext4_lblk_t max_block;
4013 __le32 nr = 0, nr2 = 0;
4014 int n = 0, n2 = 0;
4015 @@ -1260,7 +1261,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
4016 }
4017
4018
4019 - partial = ext4_find_shared(inode, n, offsets, chain, &nr);
4020 + partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
4021 if (nr) {
4022 if (partial == chain) {
4023 /* Shared branch grows from the inode */
4024 @@ -1285,13 +1286,11 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
4025 partial->p + 1,
4026 (__le32 *)partial->bh->b_data+addr_per_block,
4027 (chain+n-1) - partial);
4028 - BUFFER_TRACE(partial->bh, "call brelse");
4029 - brelse(partial->bh);
4030 partial--;
4031 }
4032
4033 end_range:
4034 - partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
4035 + partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
4036 if (nr2) {
4037 if (partial2 == chain2) {
4038 /*
4039 @@ -1321,16 +1320,14 @@ end_range:
4040 (__le32 *)partial2->bh->b_data,
4041 partial2->p,
4042 (chain2+n2-1) - partial2);
4043 - BUFFER_TRACE(partial2->bh, "call brelse");
4044 - brelse(partial2->bh);
4045 partial2--;
4046 }
4047 goto do_indirects;
4048 }
4049
4050 /* Punch happened within the same level (n == n2) */
4051 - partial = ext4_find_shared(inode, n, offsets, chain, &nr);
4052 - partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
4053 + partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
4054 + partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
4055
4056 /* Free top, but only if partial2 isn't its subtree. */
4057 if (nr) {
4058 @@ -1387,15 +1384,7 @@ end_range:
4059 partial->p + 1,
4060 partial2->p,
4061 (chain+n-1) - partial);
4062 - while (partial > chain) {
4063 - BUFFER_TRACE(partial->bh, "call brelse");
4064 - brelse(partial->bh);
4065 - }
4066 - while (partial2 > chain2) {
4067 - BUFFER_TRACE(partial2->bh, "call brelse");
4068 - brelse(partial2->bh);
4069 - }
4070 - return 0;
4071 + goto cleanup;
4072 }
4073
4074 /*
4075 @@ -1410,8 +1399,6 @@ end_range:
4076 partial->p + 1,
4077 (__le32 *)partial->bh->b_data+addr_per_block,
4078 (chain+n-1) - partial);
4079 - BUFFER_TRACE(partial->bh, "call brelse");
4080 - brelse(partial->bh);
4081 partial--;
4082 }
4083 if (partial2 > chain2 && depth2 <= depth) {
4084 @@ -1419,11 +1406,21 @@ end_range:
4085 (__le32 *)partial2->bh->b_data,
4086 partial2->p,
4087 (chain2+n2-1) - partial2);
4088 - BUFFER_TRACE(partial2->bh, "call brelse");
4089 - brelse(partial2->bh);
4090 partial2--;
4091 }
4092 }
4093 +
4094 +cleanup:
4095 + while (p && p > chain) {
4096 + BUFFER_TRACE(p->bh, "call brelse");
4097 + brelse(p->bh);
4098 + p--;
4099 + }
4100 + while (p2 && p2 > chain2) {
4101 + BUFFER_TRACE(p2->bh, "call brelse");
4102 + brelse(p2->bh);
4103 + p2--;
4104 + }
4105 return 0;
4106
4107 do_indirects:
4108 @@ -1431,7 +1428,7 @@ do_indirects:
4109 switch (offsets[0]) {
4110 default:
4111 if (++n >= n2)
4112 - return 0;
4113 + break;
4114 nr = i_data[EXT4_IND_BLOCK];
4115 if (nr) {
4116 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
4117 @@ -1439,7 +1436,7 @@ do_indirects:
4118 }
4119 case EXT4_IND_BLOCK:
4120 if (++n >= n2)
4121 - return 0;
4122 + break;
4123 nr = i_data[EXT4_DIND_BLOCK];
4124 if (nr) {
4125 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
4126 @@ -1447,7 +1444,7 @@ do_indirects:
4127 }
4128 case EXT4_DIND_BLOCK:
4129 if (++n >= n2)
4130 - return 0;
4131 + break;
4132 nr = i_data[EXT4_TIND_BLOCK];
4133 if (nr) {
4134 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
4135 @@ -1456,5 +1453,5 @@ do_indirects:
4136 case EXT4_TIND_BLOCK:
4137 ;
4138 }
4139 - return 0;
4140 + goto cleanup;
4141 }
4142 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
4143 index 42aef5c94927..a3ba20e5946f 100644
4144 --- a/fs/f2fs/f2fs.h
4145 +++ b/fs/f2fs/f2fs.h
4146 @@ -450,7 +450,6 @@ struct f2fs_flush_device {
4147
4148 /* for inline stuff */
4149 #define DEF_INLINE_RESERVED_SIZE 1
4150 -#define DEF_MIN_INLINE_SIZE 1
4151 static inline int get_extra_isize(struct inode *inode);
4152 static inline int get_inline_xattr_addrs(struct inode *inode);
4153 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
4154 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
4155 index 115dc219344b..92703efde36e 100644
4156 --- a/fs/f2fs/inline.c
4157 +++ b/fs/f2fs/inline.c
4158 @@ -661,6 +661,12 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
4159 if (IS_ERR(ipage))
4160 return PTR_ERR(ipage);
4161
4162 + /*
4163 + * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
4164 + * ipage without page's lock held.
4165 + */
4166 + unlock_page(ipage);
4167 +
4168 inline_dentry = inline_data_addr(inode, ipage);
4169
4170 make_dentry_ptr_inline(inode, &d, inline_dentry);
4171 @@ -669,7 +675,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
4172 if (!err)
4173 ctx->pos = d.max;
4174
4175 - f2fs_put_page(ipage, 1);
4176 + f2fs_put_page(ipage, 0);
4177 return err < 0 ? err : 0;
4178 }
4179
4180 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
4181 index c9639ef0e8d5..79370b7fa9d2 100644
4182 --- a/fs/f2fs/super.c
4183 +++ b/fs/f2fs/super.c
4184 @@ -822,12 +822,13 @@ static int parse_options(struct super_block *sb, char *options)
4185 "set with inline_xattr option");
4186 return -EINVAL;
4187 }
4188 - if (!F2FS_OPTION(sbi).inline_xattr_size ||
4189 - F2FS_OPTION(sbi).inline_xattr_size >=
4190 - DEF_ADDRS_PER_INODE -
4191 - F2FS_TOTAL_EXTRA_ATTR_SIZE -
4192 - DEF_INLINE_RESERVED_SIZE -
4193 - DEF_MIN_INLINE_SIZE) {
4194 + if (F2FS_OPTION(sbi).inline_xattr_size <
4195 + sizeof(struct f2fs_xattr_header) / sizeof(__le32) ||
4196 + F2FS_OPTION(sbi).inline_xattr_size >
4197 + DEF_ADDRS_PER_INODE -
4198 + F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) -
4199 + DEF_INLINE_RESERVED_SIZE -
4200 + MIN_INLINE_DENTRY_SIZE / sizeof(__le32)) {
4201 f2fs_msg(sb, KERN_ERR,
4202 "inline xattr size is out of range");
4203 return -EINVAL;
4204 diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
4205 index 81c0e5337443..98887187af4c 100644
4206 --- a/fs/f2fs/sysfs.c
4207 +++ b/fs/f2fs/sysfs.c
4208 @@ -273,10 +273,16 @@ out:
4209 return count;
4210 }
4211
4212 - *ui = t;
4213
4214 - if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
4215 - f2fs_reset_iostat(sbi);
4216 + if (!strcmp(a->attr.name, "iostat_enable")) {
4217 + sbi->iostat_enable = !!t;
4218 + if (!sbi->iostat_enable)
4219 + f2fs_reset_iostat(sbi);
4220 + return count;
4221 + }
4222 +
4223 + *ui = (unsigned int)t;
4224 +
4225 return count;
4226 }
4227
4228 diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
4229 index a1fcd00bbb2b..8ac1851a21c0 100644
4230 --- a/fs/f2fs/trace.c
4231 +++ b/fs/f2fs/trace.c
4232 @@ -17,7 +17,7 @@
4233 #include "trace.h"
4234
4235 static RADIX_TREE(pids, GFP_ATOMIC);
4236 -static struct mutex pids_lock;
4237 +static spinlock_t pids_lock;
4238 static struct last_io_info last_io;
4239
4240 static inline void __print_last_io(void)
4241 @@ -61,23 +61,29 @@ void f2fs_trace_pid(struct page *page)
4242
4243 set_page_private(page, (unsigned long)pid);
4244
4245 +retry:
4246 if (radix_tree_preload(GFP_NOFS))
4247 return;
4248
4249 - mutex_lock(&pids_lock);
4250 + spin_lock(&pids_lock);
4251 p = radix_tree_lookup(&pids, pid);
4252 if (p == current)
4253 goto out;
4254 if (p)
4255 radix_tree_delete(&pids, pid);
4256
4257 - f2fs_radix_tree_insert(&pids, pid, current);
4258 + if (radix_tree_insert(&pids, pid, current)) {
4259 + spin_unlock(&pids_lock);
4260 + radix_tree_preload_end();
4261 + cond_resched();
4262 + goto retry;
4263 + }
4264
4265 trace_printk("%3x:%3x %4x %-16s\n",
4266 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
4267 pid, current->comm);
4268 out:
4269 - mutex_unlock(&pids_lock);
4270 + spin_unlock(&pids_lock);
4271 radix_tree_preload_end();
4272 }
4273
4274 @@ -122,7 +128,7 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
4275
4276 void f2fs_build_trace_ios(void)
4277 {
4278 - mutex_init(&pids_lock);
4279 + spin_lock_init(&pids_lock);
4280 }
4281
4282 #define PIDVEC_SIZE 128
4283 @@ -150,7 +156,7 @@ void f2fs_destroy_trace_ios(void)
4284 pid_t next_pid = 0;
4285 unsigned int found;
4286
4287 - mutex_lock(&pids_lock);
4288 + spin_lock(&pids_lock);
4289 while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
4290 unsigned idx;
4291
4292 @@ -158,5 +164,5 @@ void f2fs_destroy_trace_ios(void)
4293 for (idx = 0; idx < found; idx++)
4294 radix_tree_delete(&pids, pid[idx]);
4295 }
4296 - mutex_unlock(&pids_lock);
4297 + spin_unlock(&pids_lock);
4298 }
4299 diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
4300 index 087e53a2d96c..409a637f7a92 100644
4301 --- a/fs/f2fs/xattr.c
4302 +++ b/fs/f2fs/xattr.c
4303 @@ -227,11 +227,11 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
4304 {
4305 struct f2fs_xattr_entry *entry;
4306 unsigned int inline_size = inline_xattr_size(inode);
4307 + void *max_addr = base_addr + inline_size;
4308
4309 list_for_each_xattr(entry, base_addr) {
4310 - if ((void *)entry + sizeof(__u32) > base_addr + inline_size ||
4311 - (void *)XATTR_NEXT_ENTRY(entry) + sizeof(__u32) >
4312 - base_addr + inline_size) {
4313 + if ((void *)entry + sizeof(__u32) > max_addr ||
4314 + (void *)XATTR_NEXT_ENTRY(entry) > max_addr) {
4315 *last_addr = entry;
4316 return NULL;
4317 }
4318 @@ -242,6 +242,13 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
4319 if (!memcmp(entry->e_name, name, len))
4320 break;
4321 }
4322 +
4323 + /* inline xattr header or entry across max inline xattr size */
4324 + if (IS_XATTR_LAST_ENTRY(entry) &&
4325 + (void *)entry + sizeof(__u32) > max_addr) {
4326 + *last_addr = entry;
4327 + return NULL;
4328 + }
4329 return entry;
4330 }
4331
4332 diff --git a/fs/file.c b/fs/file.c
4333 index 7ffd6e9d103d..780d29e58847 100644
4334 --- a/fs/file.c
4335 +++ b/fs/file.c
4336 @@ -457,6 +457,7 @@ struct files_struct init_files = {
4337 .full_fds_bits = init_files.full_fds_bits_init,
4338 },
4339 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
4340 + .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
4341 };
4342
4343 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
4344 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
4345 index 150cc030b4d7..65ea0355a4f6 100644
4346 --- a/fs/jbd2/commit.c
4347 +++ b/fs/jbd2/commit.c
4348 @@ -691,9 +691,11 @@ void jbd2_journal_commit_transaction(journal_t *journal)
4349 the last tag we set up. */
4350
4351 tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
4352 -
4353 - jbd2_descriptor_block_csum_set(journal, descriptor);
4354 start_journal_io:
4355 + if (descriptor)
4356 + jbd2_descriptor_block_csum_set(journal,
4357 + descriptor);
4358 +
4359 for (i = 0; i < bufs; i++) {
4360 struct buffer_head *bh = wbuf[i];
4361 /*
4362 diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
4363 index 8ef6b6daaa7a..88f2a49338a1 100644
4364 --- a/fs/jbd2/journal.c
4365 +++ b/fs/jbd2/journal.c
4366 @@ -1356,6 +1356,10 @@ static int journal_reset(journal_t *journal)
4367 return jbd2_journal_start_thread(journal);
4368 }
4369
4370 +/*
4371 + * This function expects that the caller will have locked the journal
4372 + * buffer head, and will return with it unlocked
4373 + */
4374 static int jbd2_write_superblock(journal_t *journal, int write_flags)
4375 {
4376 struct buffer_head *bh = journal->j_sb_buffer;
4377 @@ -1365,7 +1369,6 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
4378 trace_jbd2_write_superblock(journal, write_flags);
4379 if (!(journal->j_flags & JBD2_BARRIER))
4380 write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
4381 - lock_buffer(bh);
4382 if (buffer_write_io_error(bh)) {
4383 /*
4384 * Oh, dear. A previous attempt to write the journal
4385 @@ -1424,6 +1427,7 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
4386 jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
4387 tail_block, tail_tid);
4388
4389 + lock_buffer(journal->j_sb_buffer);
4390 sb->s_sequence = cpu_to_be32(tail_tid);
4391 sb->s_start = cpu_to_be32(tail_block);
4392
4393 @@ -1454,18 +1458,17 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
4394 journal_superblock_t *sb = journal->j_superblock;
4395
4396 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
4397 - read_lock(&journal->j_state_lock);
4398 - /* Is it already empty? */
4399 - if (sb->s_start == 0) {
4400 - read_unlock(&journal->j_state_lock);
4401 + lock_buffer(journal->j_sb_buffer);
4402 + if (sb->s_start == 0) { /* Is it already empty? */
4403 + unlock_buffer(journal->j_sb_buffer);
4404 return;
4405 }
4406 +
4407 jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
4408 journal->j_tail_sequence);
4409
4410 sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
4411 sb->s_start = cpu_to_be32(0);
4412 - read_unlock(&journal->j_state_lock);
4413
4414 jbd2_write_superblock(journal, write_op);
4415
4416 @@ -1488,9 +1491,8 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
4417 journal_superblock_t *sb = journal->j_superblock;
4418 int errcode;
4419
4420 - read_lock(&journal->j_state_lock);
4421 + lock_buffer(journal->j_sb_buffer);
4422 errcode = journal->j_errno;
4423 - read_unlock(&journal->j_state_lock);
4424 if (errcode == -ESHUTDOWN)
4425 errcode = 0;
4426 jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
4427 @@ -1894,28 +1896,27 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
4428
4429 sb = journal->j_superblock;
4430
4431 + /* Load the checksum driver if necessary */
4432 + if ((journal->j_chksum_driver == NULL) &&
4433 + INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
4434 + journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
4435 + if (IS_ERR(journal->j_chksum_driver)) {
4436 + printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
4437 + journal->j_chksum_driver = NULL;
4438 + return 0;
4439 + }
4440 + /* Precompute checksum seed for all metadata */
4441 + journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
4442 + sizeof(sb->s_uuid));
4443 + }
4444 +
4445 + lock_buffer(journal->j_sb_buffer);
4446 +
4447 /* If enabling v3 checksums, update superblock */
4448 if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
4449 sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
4450 sb->s_feature_compat &=
4451 ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
4452 -
4453 - /* Load the checksum driver */
4454 - if (journal->j_chksum_driver == NULL) {
4455 - journal->j_chksum_driver = crypto_alloc_shash("crc32c",
4456 - 0, 0);
4457 - if (IS_ERR(journal->j_chksum_driver)) {
4458 - printk(KERN_ERR "JBD2: Cannot load crc32c "
4459 - "driver.\n");
4460 - journal->j_chksum_driver = NULL;
4461 - return 0;
4462 - }
4463 -
4464 - /* Precompute checksum seed for all metadata */
4465 - journal->j_csum_seed = jbd2_chksum(journal, ~0,
4466 - sb->s_uuid,
4467 - sizeof(sb->s_uuid));
4468 - }
4469 }
4470
4471 /* If enabling v1 checksums, downgrade superblock */
4472 @@ -1927,6 +1928,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
4473 sb->s_feature_compat |= cpu_to_be32(compat);
4474 sb->s_feature_ro_compat |= cpu_to_be32(ro);
4475 sb->s_feature_incompat |= cpu_to_be32(incompat);
4476 + unlock_buffer(journal->j_sb_buffer);
4477
4478 return 1;
4479 #undef COMPAT_FEATURE_ON
4480 diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
4481 index 0e4166cc23a0..4ac775e32240 100644
4482 --- a/fs/ocfs2/cluster/nodemanager.c
4483 +++ b/fs/ocfs2/cluster/nodemanager.c
4484 @@ -621,13 +621,15 @@ static void o2nm_node_group_drop_item(struct config_group *group,
4485 struct o2nm_node *node = to_o2nm_node(item);
4486 struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
4487
4488 - o2net_disconnect_node(node);
4489 + if (cluster->cl_nodes[node->nd_num] == node) {
4490 + o2net_disconnect_node(node);
4491
4492 - if (cluster->cl_has_local &&
4493 - (cluster->cl_local_node == node->nd_num)) {
4494 - cluster->cl_has_local = 0;
4495 - cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
4496 - o2net_stop_listening(node);
4497 + if (cluster->cl_has_local &&
4498 + (cluster->cl_local_node == node->nd_num)) {
4499 + cluster->cl_has_local = 0;
4500 + cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
4501 + o2net_stop_listening(node);
4502 + }
4503 }
4504
4505 /* XXX call into net to stop this node from trading messages */
4506 diff --git a/fs/read_write.c b/fs/read_write.c
4507 index 8a2737f0d61d..562974a0616c 100644
4508 --- a/fs/read_write.c
4509 +++ b/fs/read_write.c
4510 @@ -1241,6 +1241,9 @@ COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
4511 const struct compat_iovec __user *,vec,
4512 unsigned long, vlen, loff_t, pos, rwf_t, flags)
4513 {
4514 + if (pos == -1)
4515 + return do_compat_readv(fd, vec, vlen, flags);
4516 +
4517 return do_compat_preadv64(fd, vec, vlen, pos, flags);
4518 }
4519 #endif
4520 @@ -1347,6 +1350,9 @@ COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
4521 const struct compat_iovec __user *,vec,
4522 unsigned long, vlen, loff_t, pos, rwf_t, flags)
4523 {
4524 + if (pos == -1)
4525 + return do_compat_writev(fd, vec, vlen, flags);
4526 +
4527 return do_compat_pwritev64(fd, vec, vlen, pos, flags);
4528 }
4529 #endif
4530 diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
4531 index 22254c1fe1c5..6002275937f5 100644
4532 --- a/include/linux/cgroup-defs.h
4533 +++ b/include/linux/cgroup-defs.h
4534 @@ -597,7 +597,7 @@ struct cgroup_subsys {
4535 void (*cancel_fork)(struct task_struct *task);
4536 void (*fork)(struct task_struct *task);
4537 void (*exit)(struct task_struct *task);
4538 - void (*free)(struct task_struct *task);
4539 + void (*release)(struct task_struct *task);
4540 void (*bind)(struct cgroup_subsys_state *root_css);
4541
4542 bool early_init:1;
4543 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
4544 index 32c553556bbd..ca51b2c15bcc 100644
4545 --- a/include/linux/cgroup.h
4546 +++ b/include/linux/cgroup.h
4547 @@ -119,6 +119,7 @@ extern int cgroup_can_fork(struct task_struct *p);
4548 extern void cgroup_cancel_fork(struct task_struct *p);
4549 extern void cgroup_post_fork(struct task_struct *p);
4550 void cgroup_exit(struct task_struct *p);
4551 +void cgroup_release(struct task_struct *p);
4552 void cgroup_free(struct task_struct *p);
4553
4554 int cgroup_init_early(void);
4555 @@ -699,6 +700,7 @@ static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
4556 static inline void cgroup_cancel_fork(struct task_struct *p) {}
4557 static inline void cgroup_post_fork(struct task_struct *p) {}
4558 static inline void cgroup_exit(struct task_struct *p) {}
4559 +static inline void cgroup_release(struct task_struct *p) {}
4560 static inline void cgroup_free(struct task_struct *p) {}
4561
4562 static inline int cgroup_init_early(void) { return 0; }
4563 diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
4564 index 08b1aa70a38d..d1b6d2c3ada6 100644
4565 --- a/include/linux/clk-provider.h
4566 +++ b/include/linux/clk-provider.h
4567 @@ -782,6 +782,9 @@ unsigned int __clk_get_enable_count(struct clk *clk);
4568 unsigned long clk_hw_get_rate(const struct clk_hw *hw);
4569 unsigned long __clk_get_flags(struct clk *clk);
4570 unsigned long clk_hw_get_flags(const struct clk_hw *hw);
4571 +#define clk_hw_can_set_rate_parent(hw) \
4572 + (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
4573 +
4574 bool clk_hw_is_prepared(const struct clk_hw *hw);
4575 bool clk_hw_rate_is_protected(const struct clk_hw *hw);
4576 bool clk_hw_is_enabled(const struct clk_hw *hw);
4577 diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
4578 index f70f8ac9c4f4..40fec5f94949 100644
4579 --- a/include/linux/f2fs_fs.h
4580 +++ b/include/linux/f2fs_fs.h
4581 @@ -489,12 +489,12 @@ typedef __le32 f2fs_hash_t;
4582
4583 /*
4584 * space utilization of regular dentry and inline dentry (w/o extra reservation)
4585 - * regular dentry inline dentry
4586 - * bitmap 1 * 27 = 27 1 * 23 = 23
4587 - * reserved 1 * 3 = 3 1 * 7 = 7
4588 - * dentry 11 * 214 = 2354 11 * 182 = 2002
4589 - * filename 8 * 214 = 1712 8 * 182 = 1456
4590 - * total 4096 3488
4591 + * regular dentry inline dentry (def) inline dentry (min)
4592 + * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1
4593 + * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1
4594 + * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22
4595 + * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16
4596 + * total 4096 3488 40
4597 *
4598 * Note: there are more reserved space in inline dentry than in regular
4599 * dentry, when converting inline dentry we should handle this carefully.
4600 @@ -506,6 +506,7 @@ typedef __le32 f2fs_hash_t;
4601 #define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
4602 F2FS_SLOT_LEN) * \
4603 NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
4604 +#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
4605
4606 /* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
4607 struct f2fs_dir_entry {
4608 diff --git a/include/linux/filter.h b/include/linux/filter.h
4609 index 1a39d57eb88f..037610845892 100644
4610 --- a/include/linux/filter.h
4611 +++ b/include/linux/filter.h
4612 @@ -844,7 +844,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
4613 unsigned int alignment,
4614 bpf_jit_fill_hole_t bpf_fill_ill_insns);
4615 void bpf_jit_binary_free(struct bpf_binary_header *hdr);
4616 -
4617 +u64 bpf_jit_alloc_exec_limit(void);
4618 +void *bpf_jit_alloc_exec(unsigned long size);
4619 +void bpf_jit_free_exec(void *addr);
4620 void bpf_jit_free(struct bpf_prog *fp);
4621
4622 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *fp);
4623 diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
4624 index dd1e40ddac7d..875c41b23f20 100644
4625 --- a/include/linux/irqdesc.h
4626 +++ b/include/linux/irqdesc.h
4627 @@ -65,6 +65,7 @@ struct irq_desc {
4628 unsigned int core_internal_state__do_not_mess_with_it;
4629 unsigned int depth; /* nested irq disables */
4630 unsigned int wake_depth; /* nested wake enables */
4631 + unsigned int tot_count;
4632 unsigned int irq_count; /* For detecting broken IRQs */
4633 unsigned long last_unhandled; /* Aging timer for unhandled count */
4634 unsigned int irqs_unhandled;
4635 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
4636 index c2876e740514..42fc852bf512 100644
4637 --- a/include/linux/perf_event.h
4638 +++ b/include/linux/perf_event.h
4639 @@ -409,7 +409,7 @@ struct pmu {
4640 /*
4641 * Set up pmu-private data structures for an AUX area
4642 */
4643 - void *(*setup_aux) (int cpu, void **pages,
4644 + void *(*setup_aux) (struct perf_event *event, void **pages,
4645 int nr_pages, bool overwrite);
4646 /* optional */
4647
4648 diff --git a/include/linux/relay.h b/include/linux/relay.h
4649 index e1bdf01a86e2..c759f96e39c1 100644
4650 --- a/include/linux/relay.h
4651 +++ b/include/linux/relay.h
4652 @@ -66,7 +66,7 @@ struct rchan
4653 struct kref kref; /* channel refcount */
4654 void *private_data; /* for user-defined data */
4655 size_t last_toobig; /* tried to log event > subbuf size */
4656 - struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
4657 + struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */
4658 int is_global; /* One global buffer ? */
4659 struct list_head list; /* for channel list */
4660 struct dentry *parent; /* parent dentry passed to open */
4661 diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
4662 index 0940fda59872..941bfd9b3c89 100644
4663 --- a/include/linux/ring_buffer.h
4664 +++ b/include/linux/ring_buffer.h
4665 @@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
4666 unsigned long *lost_events);
4667
4668 struct ring_buffer_iter *
4669 -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
4670 +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
4671 void ring_buffer_read_prepare_sync(void);
4672 void ring_buffer_read_start(struct ring_buffer_iter *iter);
4673 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
4674 diff --git a/include/linux/sched.h b/include/linux/sched.h
4675 index 4abb5bd74b04..5dc024e28397 100644
4676 --- a/include/linux/sched.h
4677 +++ b/include/linux/sched.h
4678 @@ -1737,9 +1737,9 @@ static __always_inline bool need_resched(void)
4679 static inline unsigned int task_cpu(const struct task_struct *p)
4680 {
4681 #ifdef CONFIG_THREAD_INFO_IN_TASK
4682 - return p->cpu;
4683 + return READ_ONCE(p->cpu);
4684 #else
4685 - return task_thread_info(p)->cpu;
4686 + return READ_ONCE(task_thread_info(p)->cpu);
4687 #endif
4688 }
4689
4690 diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
4691 index 26347741ba50..15f3f61f7e3b 100644
4692 --- a/include/linux/sched/topology.h
4693 +++ b/include/linux/sched/topology.h
4694 @@ -177,10 +177,10 @@ typedef int (*sched_domain_flags_f)(void);
4695 #define SDTL_OVERLAP 0x01
4696
4697 struct sd_data {
4698 - struct sched_domain **__percpu sd;
4699 - struct sched_domain_shared **__percpu sds;
4700 - struct sched_group **__percpu sg;
4701 - struct sched_group_capacity **__percpu sgc;
4702 + struct sched_domain *__percpu *sd;
4703 + struct sched_domain_shared *__percpu *sds;
4704 + struct sched_group *__percpu *sg;
4705 + struct sched_group_capacity *__percpu *sgc;
4706 };
4707
4708 struct sched_domain_topology_level {
4709 diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
4710 index 74af19c3a8f7..a4ba601b5d04 100644
4711 --- a/include/net/netfilter/br_netfilter.h
4712 +++ b/include/net/netfilter/br_netfilter.h
4713 @@ -49,7 +49,6 @@ static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
4714 }
4715
4716 struct net_device *setup_pre_routing(struct sk_buff *skb);
4717 -void br_netfilter_enable(void);
4718
4719 #if IS_ENABLED(CONFIG_IPV6)
4720 int br_validate_ipv6(struct net *net, struct sk_buff *skb);
4721 diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
4722 index cb8a273732cf..bb8092fa1e36 100644
4723 --- a/include/scsi/libfcoe.h
4724 +++ b/include/scsi/libfcoe.h
4725 @@ -79,7 +79,7 @@ enum fip_state {
4726 * It must not change after fcoe_ctlr_init() sets it.
4727 */
4728 enum fip_mode {
4729 - FIP_MODE_AUTO = FIP_ST_AUTO,
4730 + FIP_MODE_AUTO,
4731 FIP_MODE_NON_FIP,
4732 FIP_MODE_FABRIC,
4733 FIP_MODE_VN2VN,
4734 @@ -250,7 +250,7 @@ struct fcoe_rport {
4735 };
4736
4737 /* FIP API functions */
4738 -void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_state);
4739 +void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_mode);
4740 void fcoe_ctlr_destroy(struct fcoe_ctlr *);
4741 void fcoe_ctlr_link_up(struct fcoe_ctlr *);
4742 int fcoe_ctlr_link_down(struct fcoe_ctlr *);
4743 diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
4744 index e710ac7fbbbf..63dae7e0ccae 100644
4745 --- a/kernel/cgroup/cgroup.c
4746 +++ b/kernel/cgroup/cgroup.c
4747 @@ -195,7 +195,7 @@ static u64 css_serial_nr_next = 1;
4748 */
4749 static u16 have_fork_callback __read_mostly;
4750 static u16 have_exit_callback __read_mostly;
4751 -static u16 have_free_callback __read_mostly;
4752 +static u16 have_release_callback __read_mostly;
4753 static u16 have_canfork_callback __read_mostly;
4754
4755 /* cgroup namespace for init task */
4756 @@ -5240,7 +5240,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
4757
4758 have_fork_callback |= (bool)ss->fork << ss->id;
4759 have_exit_callback |= (bool)ss->exit << ss->id;
4760 - have_free_callback |= (bool)ss->free << ss->id;
4761 + have_release_callback |= (bool)ss->release << ss->id;
4762 have_canfork_callback |= (bool)ss->can_fork << ss->id;
4763
4764 /* At system boot, before all subsystems have been
4765 @@ -5676,16 +5676,19 @@ void cgroup_exit(struct task_struct *tsk)
4766 } while_each_subsys_mask();
4767 }
4768
4769 -void cgroup_free(struct task_struct *task)
4770 +void cgroup_release(struct task_struct *task)
4771 {
4772 - struct css_set *cset = task_css_set(task);
4773 struct cgroup_subsys *ss;
4774 int ssid;
4775
4776 - do_each_subsys_mask(ss, ssid, have_free_callback) {
4777 - ss->free(task);
4778 + do_each_subsys_mask(ss, ssid, have_release_callback) {
4779 + ss->release(task);
4780 } while_each_subsys_mask();
4781 +}
4782
4783 +void cgroup_free(struct task_struct *task)
4784 +{
4785 + struct css_set *cset = task_css_set(task);
4786 put_css_set(cset);
4787 }
4788
4789 diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
4790 index 9829c67ebc0a..c9960baaa14f 100644
4791 --- a/kernel/cgroup/pids.c
4792 +++ b/kernel/cgroup/pids.c
4793 @@ -247,7 +247,7 @@ static void pids_cancel_fork(struct task_struct *task)
4794 pids_uncharge(pids, 1);
4795 }
4796
4797 -static void pids_free(struct task_struct *task)
4798 +static void pids_release(struct task_struct *task)
4799 {
4800 struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
4801
4802 @@ -342,7 +342,7 @@ struct cgroup_subsys pids_cgrp_subsys = {
4803 .cancel_attach = pids_cancel_attach,
4804 .can_fork = pids_can_fork,
4805 .cancel_fork = pids_cancel_fork,
4806 - .free = pids_free,
4807 + .release = pids_release,
4808 .legacy_cftypes = pids_files,
4809 .dfl_cftypes = pids_files,
4810 .threaded = true,
4811 diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
4812 index d503d1a9007c..bb95a35e8c2d 100644
4813 --- a/kernel/cgroup/rstat.c
4814 +++ b/kernel/cgroup/rstat.c
4815 @@ -87,7 +87,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
4816 struct cgroup *root, int cpu)
4817 {
4818 struct cgroup_rstat_cpu *rstatc;
4819 - struct cgroup *parent;
4820
4821 if (pos == root)
4822 return NULL;
4823 @@ -115,8 +114,8 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
4824 * However, due to the way we traverse, @pos will be the first
4825 * child in most cases. The only exception is @root.
4826 */
4827 - parent = cgroup_parent(pos);
4828 - if (parent && rstatc->updated_next) {
4829 + if (rstatc->updated_next) {
4830 + struct cgroup *parent = cgroup_parent(pos);
4831 struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
4832 struct cgroup_rstat_cpu *nrstatc;
4833 struct cgroup **nextp;
4834 @@ -140,9 +139,12 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
4835 * updated stat.
4836 */
4837 smp_mb();
4838 +
4839 + return pos;
4840 }
4841
4842 - return pos;
4843 + /* only happens for @root */
4844 + return NULL;
4845 }
4846
4847 /* see cgroup_rstat_flush() */
4848 diff --git a/kernel/cpu.c b/kernel/cpu.c
4849 index 9d0ecc4a0e79..dc250ec2c096 100644
4850 --- a/kernel/cpu.c
4851 +++ b/kernel/cpu.c
4852 @@ -313,6 +313,15 @@ void cpus_write_unlock(void)
4853
4854 void lockdep_assert_cpus_held(void)
4855 {
4856 + /*
4857 + * We can't have hotplug operations before userspace starts running,
4858 + * and some init codepaths will knowingly not take the hotplug lock.
4859 + * This is all valid, so mute lockdep until it makes sense to report
4860 + * unheld locks.
4861 + */
4862 + if (system_state < SYSTEM_RUNNING)
4863 + return;
4864 +
4865 percpu_rwsem_assert_held(&cpu_hotplug_lock);
4866 }
4867
4868 diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
4869 index 5631af940316..474b2ccdbe69 100644
4870 --- a/kernel/events/ring_buffer.c
4871 +++ b/kernel/events/ring_buffer.c
4872 @@ -648,7 +648,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
4873 goto out;
4874 }
4875
4876 - rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
4877 + rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
4878 overwrite);
4879 if (!rb->aux_priv)
4880 goto out;
4881 diff --git a/kernel/exit.c b/kernel/exit.c
4882 index d607e23fd0c3..5c0964dc805a 100644
4883 --- a/kernel/exit.c
4884 +++ b/kernel/exit.c
4885 @@ -219,6 +219,7 @@ repeat:
4886 }
4887
4888 write_unlock_irq(&tasklist_lock);
4889 + cgroup_release(p);
4890 release_thread(p);
4891 call_rcu(&p->rcu, delayed_put_task_struct);
4892
4893 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
4894 index a2b3d9de999c..811009ebacd4 100644
4895 --- a/kernel/irq/chip.c
4896 +++ b/kernel/irq/chip.c
4897 @@ -855,7 +855,11 @@ void handle_percpu_irq(struct irq_desc *desc)
4898 {
4899 struct irq_chip *chip = irq_desc_get_chip(desc);
4900
4901 - kstat_incr_irqs_this_cpu(desc);
4902 + /*
4903 + * PER CPU interrupts are not serialized. Do not touch
4904 + * desc->tot_count.
4905 + */
4906 + __kstat_incr_irqs_this_cpu(desc);
4907
4908 if (chip->irq_ack)
4909 chip->irq_ack(&desc->irq_data);
4910 @@ -884,7 +888,11 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
4911 unsigned int irq = irq_desc_get_irq(desc);
4912 irqreturn_t res;
4913
4914 - kstat_incr_irqs_this_cpu(desc);
4915 + /*
4916 + * PER CPU interrupts are not serialized. Do not touch
4917 + * desc->tot_count.
4918 + */
4919 + __kstat_incr_irqs_this_cpu(desc);
4920
4921 if (chip->irq_ack)
4922 chip->irq_ack(&desc->irq_data);
4923 diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
4924 index ca6afa267070..e74e7eea76cf 100644
4925 --- a/kernel/irq/internals.h
4926 +++ b/kernel/irq/internals.h
4927 @@ -242,12 +242,18 @@ static inline void irq_state_set_masked(struct irq_desc *desc)
4928
4929 #undef __irqd_to_state
4930
4931 -static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
4932 +static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
4933 {
4934 __this_cpu_inc(*desc->kstat_irqs);
4935 __this_cpu_inc(kstat.irqs_sum);
4936 }
4937
4938 +static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
4939 +{
4940 + __kstat_incr_irqs_this_cpu(desc);
4941 + desc->tot_count++;
4942 +}
4943 +
4944 static inline int irq_desc_get_node(struct irq_desc *desc)
4945 {
4946 return irq_common_data_get_node(&desc->irq_common_data);
4947 diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
4948 index 578d0e5f1b5b..ba454cba4069 100644
4949 --- a/kernel/irq/irqdesc.c
4950 +++ b/kernel/irq/irqdesc.c
4951 @@ -119,6 +119,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
4952 desc->depth = 1;
4953 desc->irq_count = 0;
4954 desc->irqs_unhandled = 0;
4955 + desc->tot_count = 0;
4956 desc->name = NULL;
4957 desc->owner = owner;
4958 for_each_possible_cpu(cpu)
4959 @@ -915,11 +916,15 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
4960 unsigned int kstat_irqs(unsigned int irq)
4961 {
4962 struct irq_desc *desc = irq_to_desc(irq);
4963 - int cpu;
4964 unsigned int sum = 0;
4965 + int cpu;
4966
4967 if (!desc || !desc->kstat_irqs)
4968 return 0;
4969 + if (!irq_settings_is_per_cpu_devid(desc) &&
4970 + !irq_settings_is_per_cpu(desc))
4971 + return desc->tot_count;
4972 +
4973 for_each_possible_cpu(cpu)
4974 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
4975 return sum;
4976 diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
4977 index 39cb23d22109..81688a133552 100644
4978 --- a/kernel/rcu/update.c
4979 +++ b/kernel/rcu/update.c
4980 @@ -52,6 +52,7 @@
4981 #include <linux/tick.h>
4982 #include <linux/rcupdate_wait.h>
4983 #include <linux/sched/isolation.h>
4984 +#include <linux/kprobes.h>
4985
4986 #define CREATE_TRACE_POINTS
4987
4988 @@ -253,6 +254,7 @@ int notrace debug_lockdep_rcu_enabled(void)
4989 current->lockdep_recursion == 0;
4990 }
4991 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
4992 +NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
4993
4994 /**
4995 * rcu_read_lock_held() - might we be in RCU read-side critical section?
4996 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
4997 index 152a0b0c91bb..9a4f57d7e931 100644
4998 --- a/kernel/sched/core.c
4999 +++ b/kernel/sched/core.c
5000 @@ -107,11 +107,12 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
5001 * [L] ->on_rq
5002 * RELEASE (rq->lock)
5003 *
5004 - * If we observe the old CPU in task_rq_lock, the acquire of
5005 + * If we observe the old CPU in task_rq_lock(), the acquire of
5006 * the old rq->lock will fully serialize against the stores.
5007 *
5008 - * If we observe the new CPU in task_rq_lock, the acquire will
5009 - * pair with the WMB to ensure we must then also see migrating.
5010 + * If we observe the new CPU in task_rq_lock(), the address
5011 + * dependency headed by '[L] rq = task_rq()' and the acquire
5012 + * will pair with the WMB to ensure we then also see migrating.
5013 */
5014 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
5015 rq_pin_lock(rq, rf);
5016 @@ -910,7 +911,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
5017 {
5018 lockdep_assert_held(&rq->lock);
5019
5020 - p->on_rq = TASK_ON_RQ_MIGRATING;
5021 + WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
5022 dequeue_task(rq, p, DEQUEUE_NOCLOCK);
5023 set_task_cpu(p, new_cpu);
5024 rq_unlock(rq, rf);
5025 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
5026 index 6383aa6a60ca..141ea9ff210e 100644
5027 --- a/kernel/sched/debug.c
5028 +++ b/kernel/sched/debug.c
5029 @@ -315,6 +315,7 @@ void register_sched_domain_sysctl(void)
5030 {
5031 static struct ctl_table *cpu_entries;
5032 static struct ctl_table **cpu_idx;
5033 + static bool init_done = false;
5034 char buf[32];
5035 int i;
5036
5037 @@ -344,7 +345,10 @@ void register_sched_domain_sysctl(void)
5038 if (!cpumask_available(sd_sysctl_cpus)) {
5039 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
5040 return;
5041 + }
5042
5043 + if (!init_done) {
5044 + init_done = true;
5045 /* init to possible to not have holes in @cpu_entries */
5046 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
5047 }
5048 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
5049 index b63172288f7b..4c7a837d7c14 100644
5050 --- a/kernel/sched/sched.h
5051 +++ b/kernel/sched/sched.h
5052 @@ -1331,9 +1331,9 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
5053 */
5054 smp_wmb();
5055 #ifdef CONFIG_THREAD_INFO_IN_TASK
5056 - p->cpu = cpu;
5057 + WRITE_ONCE(p->cpu, cpu);
5058 #else
5059 - task_thread_info(p)->cpu = cpu;
5060 + WRITE_ONCE(task_thread_info(p)->cpu, cpu);
5061 #endif
5062 p->wake_cpu = cpu;
5063 #endif
5064 @@ -1434,7 +1434,7 @@ static inline int task_on_rq_queued(struct task_struct *p)
5065
5066 static inline int task_on_rq_migrating(struct task_struct *p)
5067 {
5068 - return p->on_rq == TASK_ON_RQ_MIGRATING;
5069 + return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
5070 }
5071
5072 /*
5073 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
5074 index 505a41c42b96..c0a751464971 100644
5075 --- a/kernel/sched/topology.c
5076 +++ b/kernel/sched/topology.c
5077 @@ -477,7 +477,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
5078 }
5079
5080 struct s_data {
5081 - struct sched_domain ** __percpu sd;
5082 + struct sched_domain * __percpu *sd;
5083 struct root_domain *rd;
5084 };
5085
5086 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
5087 index 3b86acd5de4e..9e22660153ff 100644
5088 --- a/kernel/sysctl.c
5089 +++ b/kernel/sysctl.c
5090 @@ -126,6 +126,7 @@ static int __maybe_unused one = 1;
5091 static int __maybe_unused two = 2;
5092 static int __maybe_unused four = 4;
5093 static unsigned long one_ul = 1;
5094 +static unsigned long long_max = LONG_MAX;
5095 static int one_hundred = 100;
5096 static int one_thousand = 1000;
5097 #ifdef CONFIG_PRINTK
5098 @@ -1695,6 +1696,8 @@ static struct ctl_table fs_table[] = {
5099 .maxlen = sizeof(files_stat.max_files),
5100 .mode = 0644,
5101 .proc_handler = proc_doulongvec_minmax,
5102 + .extra1 = &zero,
5103 + .extra2 = &long_max,
5104 },
5105 {
5106 .procname = "nr_open",
5107 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
5108 index 65bd4616220d..34b4c32b0692 100644
5109 --- a/kernel/trace/ring_buffer.c
5110 +++ b/kernel/trace/ring_buffer.c
5111 @@ -4141,6 +4141,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
5112 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
5113 * @buffer: The ring buffer to read from
5114 * @cpu: The cpu buffer to iterate over
5115 + * @flags: gfp flags to use for memory allocation
5116 *
5117 * This performs the initial preparations necessary to iterate
5118 * through the buffer. Memory is allocated, buffer recording
5119 @@ -4158,7 +4159,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
5120 * This overall must be paired with ring_buffer_read_finish.
5121 */
5122 struct ring_buffer_iter *
5123 -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
5124 +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
5125 {
5126 struct ring_buffer_per_cpu *cpu_buffer;
5127 struct ring_buffer_iter *iter;
5128 @@ -4166,7 +4167,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
5129 if (!cpumask_test_cpu(cpu, buffer->cpumask))
5130 return NULL;
5131
5132 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
5133 + iter = kmalloc(sizeof(*iter), flags);
5134 if (!iter)
5135 return NULL;
5136
5137 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
5138 index 1f96b292df31..c65cea71d1ee 100644
5139 --- a/kernel/trace/trace.c
5140 +++ b/kernel/trace/trace.c
5141 @@ -3903,7 +3903,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
5142 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
5143 for_each_tracing_cpu(cpu) {
5144 iter->buffer_iter[cpu] =
5145 - ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
5146 + ring_buffer_read_prepare(iter->trace_buffer->buffer,
5147 + cpu, GFP_KERNEL);
5148 }
5149 ring_buffer_read_prepare_sync();
5150 for_each_tracing_cpu(cpu) {
5151 @@ -3913,7 +3914,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
5152 } else {
5153 cpu = iter->cpu_file;
5154 iter->buffer_iter[cpu] =
5155 - ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
5156 + ring_buffer_read_prepare(iter->trace_buffer->buffer,
5157 + cpu, GFP_KERNEL);
5158 ring_buffer_read_prepare_sync();
5159 ring_buffer_read_start(iter->buffer_iter[cpu]);
5160 tracing_iter_reset(iter, cpu);
5161 diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
5162 index d953c163a079..810d78a8d14c 100644
5163 --- a/kernel/trace/trace_kdb.c
5164 +++ b/kernel/trace/trace_kdb.c
5165 @@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
5166 if (cpu_file == RING_BUFFER_ALL_CPUS) {
5167 for_each_tracing_cpu(cpu) {
5168 iter.buffer_iter[cpu] =
5169 - ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
5170 + ring_buffer_read_prepare(iter.trace_buffer->buffer,
5171 + cpu, GFP_ATOMIC);
5172 ring_buffer_read_start(iter.buffer_iter[cpu]);
5173 tracing_iter_reset(&iter, cpu);
5174 }
5175 } else {
5176 iter.cpu_file = cpu_file;
5177 iter.buffer_iter[cpu_file] =
5178 - ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
5179 + ring_buffer_read_prepare(iter.trace_buffer->buffer,
5180 + cpu_file, GFP_ATOMIC);
5181 ring_buffer_read_start(iter.buffer_iter[cpu_file]);
5182 tracing_iter_reset(&iter, cpu_file);
5183 }
5184 diff --git a/lib/bsearch.c b/lib/bsearch.c
5185 index 18b445b010c3..82512fe7b33c 100644
5186 --- a/lib/bsearch.c
5187 +++ b/lib/bsearch.c
5188 @@ -11,6 +11,7 @@
5189
5190 #include <linux/export.h>
5191 #include <linux/bsearch.h>
5192 +#include <linux/kprobes.h>
5193
5194 /*
5195 * bsearch - binary search an array of elements
5196 @@ -53,3 +54,4 @@ void *bsearch(const void *key, const void *base, size_t num, size_t size,
5197 return NULL;
5198 }
5199 EXPORT_SYMBOL(bsearch);
5200 +NOKPROBE_SYMBOL(bsearch);
5201 diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
5202 index 7ed43eaa02ef..5e0d55c54100 100644
5203 --- a/lib/raid6/Makefile
5204 +++ b/lib/raid6/Makefile
5205 @@ -40,7 +40,7 @@ endif
5206 ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
5207 NEON_FLAGS := -ffreestanding
5208 ifeq ($(ARCH),arm)
5209 -NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
5210 +NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
5211 endif
5212 CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
5213 ifeq ($(ARCH),arm64)
5214 diff --git a/mm/cma.c b/mm/cma.c
5215 index 4cb76121a3ab..bfe9f5397165 100644
5216 --- a/mm/cma.c
5217 +++ b/mm/cma.c
5218 @@ -353,12 +353,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
5219
5220 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
5221 if (ret)
5222 - goto err;
5223 + goto free_mem;
5224
5225 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
5226 &base);
5227 return 0;
5228
5229 +free_mem:
5230 + memblock_free(base, size);
5231 err:
5232 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
5233 return ret;
5234 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
5235 index 9518aefd8cbb..7c712c4565e6 100644
5236 --- a/mm/memcontrol.c
5237 +++ b/mm/memcontrol.c
5238 @@ -248,6 +248,12 @@ enum res_type {
5239 iter != NULL; \
5240 iter = mem_cgroup_iter(NULL, iter, NULL))
5241
5242 +static inline bool should_force_charge(void)
5243 +{
5244 + return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
5245 + (current->flags & PF_EXITING);
5246 +}
5247 +
5248 /* Some nice accessors for the vmpressure. */
5249 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
5250 {
5251 @@ -1382,8 +1388,13 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
5252 };
5253 bool ret;
5254
5255 - mutex_lock(&oom_lock);
5256 - ret = out_of_memory(&oc);
5257 + if (mutex_lock_killable(&oom_lock))
5258 + return true;
5259 + /*
5260 + * A few threads which were not waiting at mutex_lock_killable() can
5261 + * fail to bail out. Therefore, check again after holding oom_lock.
5262 + */
5263 + ret = should_force_charge() || out_of_memory(&oc);
5264 mutex_unlock(&oom_lock);
5265 return ret;
5266 }
5267 @@ -2200,9 +2211,7 @@ retry:
5268 * bypass the last charges so that they can exit quickly and
5269 * free their memory.
5270 */
5271 - if (unlikely(tsk_is_oom_victim(current) ||
5272 - fatal_signal_pending(current) ||
5273 - current->flags & PF_EXITING))
5274 + if (unlikely(should_force_charge()))
5275 goto force;
5276
5277 /*
5278 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
5279 index f32d0a5be4fb..360b24bc69e5 100644
5280 --- a/mm/mempolicy.c
5281 +++ b/mm/mempolicy.c
5282 @@ -350,7 +350,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
5283 {
5284 if (!pol)
5285 return;
5286 - if (!mpol_store_user_nodemask(pol) &&
5287 + if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
5288 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
5289 return;
5290
5291 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
5292 index e66ac8a47dd6..dbddb7a409dd 100644
5293 --- a/mm/oom_kill.c
5294 +++ b/mm/oom_kill.c
5295 @@ -915,7 +915,8 @@ static void __oom_kill_process(struct task_struct *victim)
5296 */
5297 static int oom_kill_memcg_member(struct task_struct *task, void *unused)
5298 {
5299 - if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
5300 + if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
5301 + !is_global_init(task)) {
5302 get_task_struct(task);
5303 __oom_kill_process(task);
5304 }
5305 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
5306 index ef99971c13dd..8e6932a140b8 100644
5307 --- a/mm/page_alloc.c
5308 +++ b/mm/page_alloc.c
5309 @@ -1922,8 +1922,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
5310
5311 arch_alloc_page(page, order);
5312 kernel_map_pages(page, 1 << order, 1);
5313 - kernel_poison_pages(page, 1 << order, 1);
5314 kasan_alloc_pages(page, order);
5315 + kernel_poison_pages(page, 1 << order, 1);
5316 set_page_owner(page, order, gfp_flags);
5317 }
5318
5319 diff --git a/mm/page_ext.c b/mm/page_ext.c
5320 index 4961f13b6ec1..aad120123688 100644
5321 --- a/mm/page_ext.c
5322 +++ b/mm/page_ext.c
5323 @@ -273,6 +273,7 @@ static void free_page_ext(void *addr)
5324 table_size = get_entry_size() * PAGES_PER_SECTION;
5325
5326 BUG_ON(PageReserved(page));
5327 + kmemleak_free(addr);
5328 free_pages_exact(addr, table_size);
5329 }
5330 }
5331 diff --git a/mm/page_poison.c b/mm/page_poison.c
5332 index aa2b3d34e8ea..6cfa8e7d7213 100644
5333 --- a/mm/page_poison.c
5334 +++ b/mm/page_poison.c
5335 @@ -6,6 +6,7 @@
5336 #include <linux/page_ext.h>
5337 #include <linux/poison.h>
5338 #include <linux/ratelimit.h>
5339 +#include <linux/kasan.h>
5340
5341 static bool want_page_poisoning __read_mostly;
5342
5343 @@ -34,7 +35,10 @@ static void poison_page(struct page *page)
5344 {
5345 void *addr = kmap_atomic(page);
5346
5347 + /* KASAN still think the page is in-use, so skip it. */
5348 + kasan_disable_current();
5349 memset(addr, PAGE_POISON, PAGE_SIZE);
5350 + kasan_enable_current();
5351 kunmap_atomic(addr);
5352 }
5353
5354 diff --git a/mm/slab.c b/mm/slab.c
5355 index 364e42d5a399..b8e0ec74330f 100644
5356 --- a/mm/slab.c
5357 +++ b/mm/slab.c
5358 @@ -563,14 +563,6 @@ static void start_cpu_timer(int cpu)
5359
5360 static void init_arraycache(struct array_cache *ac, int limit, int batch)
5361 {
5362 - /*
5363 - * The array_cache structures contain pointers to free object.
5364 - * However, when such objects are allocated or transferred to another
5365 - * cache the pointers are not cleared and they could be counted as
5366 - * valid references during a kmemleak scan. Therefore, kmemleak must
5367 - * not scan such objects.
5368 - */
5369 - kmemleak_no_scan(ac);
5370 if (ac) {
5371 ac->avail = 0;
5372 ac->limit = limit;
5373 @@ -586,6 +578,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
5374 struct array_cache *ac = NULL;
5375
5376 ac = kmalloc_node(memsize, gfp, node);
5377 + /*
5378 + * The array_cache structures contain pointers to free object.
5379 + * However, when such objects are allocated or transferred to another
5380 + * cache the pointers are not cleared and they could be counted as
5381 + * valid references during a kmemleak scan. Therefore, kmemleak must
5382 + * not scan such objects.
5383 + */
5384 + kmemleak_no_scan(ac);
5385 init_arraycache(ac, entries, batchcount);
5386 return ac;
5387 }
5388 @@ -680,6 +680,7 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
5389
5390 alc = kmalloc_node(memsize, gfp, node);
5391 if (alc) {
5392 + kmemleak_no_scan(alc);
5393 init_arraycache(&alc->ac, entries, batch);
5394 spin_lock_init(&alc->lock);
5395 }
5396 diff --git a/mm/sparse.c b/mm/sparse.c
5397 index 10b07eea9a6e..45950a074bdb 100644
5398 --- a/mm/sparse.c
5399 +++ b/mm/sparse.c
5400 @@ -196,7 +196,7 @@ static inline int next_present_section_nr(int section_nr)
5401 }
5402 #define for_each_present_section_nr(start, section_nr) \
5403 for (section_nr = next_present_section_nr(start-1); \
5404 - ((section_nr >= 0) && \
5405 + ((section_nr != -1) && \
5406 (section_nr <= __highest_present_section_nr)); \
5407 section_nr = next_present_section_nr(section_nr))
5408
5409 diff --git a/mm/swapfile.c b/mm/swapfile.c
5410 index 340ef3177686..0047dcaf9369 100644
5411 --- a/mm/swapfile.c
5412 +++ b/mm/swapfile.c
5413 @@ -98,6 +98,15 @@ static atomic_t proc_poll_event = ATOMIC_INIT(0);
5414
5415 atomic_t nr_rotate_swap = ATOMIC_INIT(0);
5416
5417 +static struct swap_info_struct *swap_type_to_swap_info(int type)
5418 +{
5419 + if (type >= READ_ONCE(nr_swapfiles))
5420 + return NULL;
5421 +
5422 + smp_rmb(); /* Pairs with smp_wmb in alloc_swap_info. */
5423 + return READ_ONCE(swap_info[type]);
5424 +}
5425 +
5426 static inline unsigned char swap_count(unsigned char ent)
5427 {
5428 return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
5429 @@ -1030,12 +1039,14 @@ noswap:
5430 /* The only caller of this function is now suspend routine */
5431 swp_entry_t get_swap_page_of_type(int type)
5432 {
5433 - struct swap_info_struct *si;
5434 + struct swap_info_struct *si = swap_type_to_swap_info(type);
5435 pgoff_t offset;
5436
5437 - si = swap_info[type];
5438 + if (!si)
5439 + goto fail;
5440 +
5441 spin_lock(&si->lock);
5442 - if (si && (si->flags & SWP_WRITEOK)) {
5443 + if (si->flags & SWP_WRITEOK) {
5444 atomic_long_dec(&nr_swap_pages);
5445 /* This is called for allocating swap entry, not cache */
5446 offset = scan_swap_map(si, 1);
5447 @@ -1046,6 +1057,7 @@ swp_entry_t get_swap_page_of_type(int type)
5448 atomic_long_inc(&nr_swap_pages);
5449 }
5450 spin_unlock(&si->lock);
5451 +fail:
5452 return (swp_entry_t) {0};
5453 }
5454
5455 @@ -1057,9 +1069,9 @@ static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
5456 if (!entry.val)
5457 goto out;
5458 type = swp_type(entry);
5459 - if (type >= nr_swapfiles)
5460 + p = swap_type_to_swap_info(type);
5461 + if (!p)
5462 goto bad_nofile;
5463 - p = swap_info[type];
5464 if (!(p->flags & SWP_USED))
5465 goto bad_device;
5466 offset = swp_offset(entry);
5467 @@ -1708,10 +1720,9 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
5468 sector_t swapdev_block(int type, pgoff_t offset)
5469 {
5470 struct block_device *bdev;
5471 + struct swap_info_struct *si = swap_type_to_swap_info(type);
5472
5473 - if ((unsigned int)type >= nr_swapfiles)
5474 - return 0;
5475 - if (!(swap_info[type]->flags & SWP_WRITEOK))
5476 + if (!si || !(si->flags & SWP_WRITEOK))
5477 return 0;
5478 return map_swap_entry(swp_entry(type, offset), &bdev);
5479 }
5480 @@ -2269,7 +2280,7 @@ static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
5481 struct swap_extent *se;
5482 pgoff_t offset;
5483
5484 - sis = swap_info[swp_type(entry)];
5485 + sis = swp_swap_info(entry);
5486 *bdev = sis->bdev;
5487
5488 offset = swp_offset(entry);
5489 @@ -2707,9 +2718,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
5490 if (!l)
5491 return SEQ_START_TOKEN;
5492
5493 - for (type = 0; type < nr_swapfiles; type++) {
5494 - smp_rmb(); /* read nr_swapfiles before swap_info[type] */
5495 - si = swap_info[type];
5496 + for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
5497 if (!(si->flags & SWP_USED) || !si->swap_map)
5498 continue;
5499 if (!--l)
5500 @@ -2729,9 +2738,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
5501 else
5502 type = si->type + 1;
5503
5504 - for (; type < nr_swapfiles; type++) {
5505 - smp_rmb(); /* read nr_swapfiles before swap_info[type] */
5506 - si = swap_info[type];
5507 + for (; (si = swap_type_to_swap_info(type)); type++) {
5508 if (!(si->flags & SWP_USED) || !si->swap_map)
5509 continue;
5510 ++*pos;
5511 @@ -2838,14 +2845,14 @@ static struct swap_info_struct *alloc_swap_info(void)
5512 }
5513 if (type >= nr_swapfiles) {
5514 p->type = type;
5515 - swap_info[type] = p;
5516 + WRITE_ONCE(swap_info[type], p);
5517 /*
5518 * Write swap_info[type] before nr_swapfiles, in case a
5519 * racing procfs swap_start() or swap_next() is reading them.
5520 * (We never shrink nr_swapfiles, we never free this entry.)
5521 */
5522 smp_wmb();
5523 - nr_swapfiles++;
5524 + WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
5525 } else {
5526 kvfree(p);
5527 p = swap_info[type];
5528 @@ -3365,7 +3372,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
5529 {
5530 struct swap_info_struct *p;
5531 struct swap_cluster_info *ci;
5532 - unsigned long offset, type;
5533 + unsigned long offset;
5534 unsigned char count;
5535 unsigned char has_cache;
5536 int err = -EINVAL;
5537 @@ -3373,10 +3380,10 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
5538 if (non_swap_entry(entry))
5539 goto out;
5540
5541 - type = swp_type(entry);
5542 - if (type >= nr_swapfiles)
5543 + p = swp_swap_info(entry);
5544 + if (!p)
5545 goto bad_file;
5546 - p = swap_info[type];
5547 +
5548 offset = swp_offset(entry);
5549 if (unlikely(offset >= p->max))
5550 goto out;
5551 @@ -3473,7 +3480,7 @@ int swapcache_prepare(swp_entry_t entry)
5552
5553 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
5554 {
5555 - return swap_info[swp_type(entry)];
5556 + return swap_type_to_swap_info(swp_type(entry));
5557 }
5558
5559 struct swap_info_struct *page_swap_info(struct page *page)
5560 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
5561 index 91a789a46b12..a46ec261a44e 100644
5562 --- a/mm/vmalloc.c
5563 +++ b/mm/vmalloc.c
5564 @@ -498,7 +498,11 @@ nocache:
5565 }
5566
5567 found:
5568 - if (addr + size > vend)
5569 + /*
5570 + * Check also calculated address against the vstart,
5571 + * because it can be 0 because of big align request.
5572 + */
5573 + if (addr + size > vend || addr < vstart)
5574 goto overflow;
5575
5576 va->va_start = addr;
5577 diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
5578 index e07a7e62c705..3b0a03b92080 100644
5579 --- a/net/bridge/br_netfilter_hooks.c
5580 +++ b/net/bridge/br_netfilter_hooks.c
5581 @@ -884,11 +884,6 @@ static const struct nf_br_ops br_ops = {
5582 .br_dev_xmit_hook = br_nf_dev_xmit,
5583 };
5584
5585 -void br_netfilter_enable(void)
5586 -{
5587 -}
5588 -EXPORT_SYMBOL_GPL(br_netfilter_enable);
5589 -
5590 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
5591 * br_dev_queue_push_xmit is called afterwards */
5592 static const struct nf_hook_ops br_nf_ops[] = {
5593 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
5594 index 895171a2e1f1..9a249478abf2 100644
5595 --- a/net/netfilter/nf_conntrack_core.c
5596 +++ b/net/netfilter/nf_conntrack_core.c
5597 @@ -901,10 +901,18 @@ __nf_conntrack_confirm(struct sk_buff *skb)
5598 * REJECT will give spurious warnings here.
5599 */
5600
5601 - /* No external references means no one else could have
5602 - * confirmed us.
5603 + /* Another skb with the same unconfirmed conntrack may
5604 + * win the race. This may happen for bridge(br_flood)
5605 + * or broadcast/multicast packets do skb_clone with
5606 + * unconfirmed conntrack.
5607 */
5608 - WARN_ON(nf_ct_is_confirmed(ct));
5609 + if (unlikely(nf_ct_is_confirmed(ct))) {
5610 + WARN_ON_ONCE(1);
5611 + nf_conntrack_double_unlock(hash, reply_hash);
5612 + local_bh_enable();
5613 + return NF_DROP;
5614 + }
5615 +
5616 pr_debug("Confirming conntrack %p\n", ct);
5617 /* We have to check the DYING flag after unlink to prevent
5618 * a race against nf_ct_get_next_corpse() possibly called from
5619 diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
5620 index 247b89784a6f..842f3f86fb2e 100644
5621 --- a/net/netfilter/nf_conntrack_proto_tcp.c
5622 +++ b/net/netfilter/nf_conntrack_proto_tcp.c
5623 @@ -769,6 +769,12 @@ static int tcp_error(struct net *net, struct nf_conn *tmpl,
5624 return NF_ACCEPT;
5625 }
5626
5627 +static bool nf_conntrack_tcp_established(const struct nf_conn *ct)
5628 +{
5629 + return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
5630 + test_bit(IPS_ASSURED_BIT, &ct->status);
5631 +}
5632 +
5633 /* Returns verdict for packet, or -1 for invalid. */
5634 static int tcp_packet(struct nf_conn *ct,
5635 const struct sk_buff *skb,
5636 @@ -963,16 +969,38 @@ static int tcp_packet(struct nf_conn *ct,
5637 new_state = TCP_CONNTRACK_ESTABLISHED;
5638 break;
5639 case TCP_CONNTRACK_CLOSE:
5640 - if (index == TCP_RST_SET
5641 - && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
5642 - && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
5643 - /* Invalid RST */
5644 - spin_unlock_bh(&ct->lock);
5645 - nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
5646 - return -NF_ACCEPT;
5647 + if (index != TCP_RST_SET)
5648 + break;
5649 +
5650 + if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
5651 + u32 seq = ntohl(th->seq);
5652 +
5653 + if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
5654 + /* Invalid RST */
5655 + spin_unlock_bh(&ct->lock);
5656 + nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
5657 + return -NF_ACCEPT;
5658 + }
5659 +
5660 + if (!nf_conntrack_tcp_established(ct) ||
5661 + seq == ct->proto.tcp.seen[!dir].td_maxack)
5662 + break;
5663 +
5664 + /* Check if rst is part of train, such as
5665 + * foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
5666 + * foo:80 > bar:4379: R, 235946602:235946602(0) ack 42
5667 + */
5668 + if (ct->proto.tcp.last_index == TCP_ACK_SET &&
5669 + ct->proto.tcp.last_dir == dir &&
5670 + seq == ct->proto.tcp.last_end)
5671 + break;
5672 +
5673 + /* ... RST sequence number doesn't match exactly, keep
5674 + * established state to allow a possible challenge ACK.
5675 + */
5676 + new_state = old_state;
5677 }
5678 - if (index == TCP_RST_SET
5679 - && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
5680 + if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
5681 && ct->proto.tcp.last_index == TCP_SYN_SET)
5682 || (!test_bit(IPS_ASSURED_BIT, &ct->status)
5683 && ct->proto.tcp.last_index == TCP_ACK_SET))
5684 @@ -988,7 +1016,7 @@ static int tcp_packet(struct nf_conn *ct,
5685 * segments we ignored. */
5686 goto in_window;
5687 }
5688 - /* Just fall through */
5689 + break;
5690 default:
5691 /* Keep compilers happy. */
5692 break;
5693 @@ -1023,6 +1051,8 @@ static int tcp_packet(struct nf_conn *ct,
5694 if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
5695 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
5696 timeout = timeouts[TCP_CONNTRACK_RETRANS];
5697 + else if (unlikely(index == TCP_RST_SET))
5698 + timeout = timeouts[TCP_CONNTRACK_CLOSE];
5699 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
5700 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
5701 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
5702 diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
5703 index 60f258f2c707..a3850414dba2 100644
5704 --- a/net/netfilter/nf_tables_core.c
5705 +++ b/net/netfilter/nf_tables_core.c
5706 @@ -98,21 +98,23 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
5707 const struct nft_pktinfo *pkt)
5708 {
5709 struct nft_base_chain *base_chain;
5710 + struct nft_stats __percpu *pstats;
5711 struct nft_stats *stats;
5712
5713 base_chain = nft_base_chain(chain);
5714 - if (!rcu_access_pointer(base_chain->stats))
5715 - return;
5716
5717 - local_bh_disable();
5718 - stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
5719 - if (stats) {
5720 + rcu_read_lock();
5721 + pstats = READ_ONCE(base_chain->stats);
5722 + if (pstats) {
5723 + local_bh_disable();
5724 + stats = this_cpu_ptr(pstats);
5725 u64_stats_update_begin(&stats->syncp);
5726 stats->pkts++;
5727 stats->bytes += pkt->skb->len;
5728 u64_stats_update_end(&stats->syncp);
5729 + local_bh_enable();
5730 }
5731 - local_bh_enable();
5732 + rcu_read_unlock();
5733 }
5734
5735 struct nft_jumpstack {
5736 diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
5737 index 9d6d67b953ac..05f00fb20b04 100644
5738 --- a/net/netfilter/xt_physdev.c
5739 +++ b/net/netfilter/xt_physdev.c
5740 @@ -96,8 +96,7 @@ match_outdev:
5741 static int physdev_mt_check(const struct xt_mtchk_param *par)
5742 {
5743 const struct xt_physdev_info *info = par->matchinfo;
5744 -
5745 - br_netfilter_enable();
5746 + static bool brnf_probed __read_mostly;
5747
5748 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
5749 info->bitmask & ~XT_PHYSDEV_OP_MASK)
5750 @@ -111,6 +110,12 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
5751 if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
5752 return -EINVAL;
5753 }
5754 +
5755 + if (!brnf_probed) {
5756 + brnf_probed = true;
5757 + request_module("br_netfilter");
5758 + }
5759 +
5760 return 0;
5761 }
5762
5763 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
5764 index 6ea3d3aa1a1e..4337b6d9369e 100644
5765 --- a/security/selinux/hooks.c
5766 +++ b/security/selinux/hooks.c
5767 @@ -3458,12 +3458,16 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
5768 const void *value, size_t size, int flags)
5769 {
5770 struct inode_security_struct *isec = inode_security_novalidate(inode);
5771 + struct superblock_security_struct *sbsec = inode->i_sb->s_security;
5772 u32 newsid;
5773 int rc;
5774
5775 if (strcmp(name, XATTR_SELINUX_SUFFIX))
5776 return -EOPNOTSUPP;
5777
5778 + if (!(sbsec->flags & SBLABEL_MNT))
5779 + return -EOPNOTSUPP;
5780 +
5781 if (!value || !size)
5782 return -EACCES;
5783
5784 @@ -6612,7 +6616,10 @@ static void selinux_inode_invalidate_secctx(struct inode *inode)
5785 */
5786 static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
5787 {
5788 - return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
5789 + int rc = selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX,
5790 + ctx, ctxlen, 0);
5791 + /* Do not return error when suppressing label (SBLABEL_MNT not set). */
5792 + return rc == -EOPNOTSUPP ? 0 : rc;
5793 }
5794
5795 /*
5796 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
5797 index b67f6fe08a1b..e08c6c6ca029 100644
5798 --- a/sound/core/pcm_native.c
5799 +++ b/sound/core/pcm_native.c
5800 @@ -1513,6 +1513,14 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm)
5801 /* FIXME: the open/close code should lock this as well */
5802 if (substream->runtime == NULL)
5803 continue;
5804 +
5805 + /*
5806 + * Skip BE dai link PCM's that are internal and may
5807 + * not have their substream ops set.
5808 + */
5809 + if (!substream->ops)
5810 + continue;
5811 +
5812 err = snd_pcm_suspend(substream);
5813 if (err < 0 && err != -EBUSY)
5814 return err;
5815 diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
5816 index 774eb2205668..3d600f498914 100644
5817 --- a/sound/firewire/dice/dice.c
5818 +++ b/sound/firewire/dice/dice.c
5819 @@ -18,6 +18,7 @@ MODULE_LICENSE("GPL v2");
5820 #define OUI_ALESIS 0x000595
5821 #define OUI_MAUDIO 0x000d6c
5822 #define OUI_MYTEK 0x001ee8
5823 +#define OUI_SSL 0x0050c2 // Actually ID reserved by IEEE.
5824
5825 #define DICE_CATEGORY_ID 0x04
5826 #define WEISS_CATEGORY_ID 0x00
5827 @@ -216,7 +217,7 @@ static int dice_probe(struct fw_unit *unit,
5828 struct snd_dice *dice;
5829 int err;
5830
5831 - if (!entry->driver_data) {
5832 + if (!entry->driver_data && entry->vendor_id != OUI_SSL) {
5833 err = check_dice_category(unit);
5834 if (err < 0)
5835 return -ENODEV;
5836 @@ -382,6 +383,15 @@ static const struct ieee1394_device_id dice_id_table[] = {
5837 .model_id = 0x000002,
5838 .driver_data = (kernel_ulong_t)snd_dice_detect_mytek_formats,
5839 },
5840 + // Solid State Logic, Duende Classic and Mini.
5841 + // NOTE: each field of GUID in config ROM is not compliant to standard
5842 + // DICE scheme.
5843 + {
5844 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
5845 + IEEE1394_MATCH_MODEL_ID,
5846 + .vendor_id = OUI_SSL,
5847 + .model_id = 0x000070,
5848 + },
5849 {
5850 .match_flags = IEEE1394_MATCH_VERSION,
5851 .version = DICE_INTERFACE,
5852 diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
5853 index 44433b20435c..600d9be9706e 100644
5854 --- a/sound/soc/fsl/fsl-asoc-card.c
5855 +++ b/sound/soc/fsl/fsl-asoc-card.c
5856 @@ -689,6 +689,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
5857 asrc_fail:
5858 of_node_put(asrc_np);
5859 of_node_put(codec_np);
5860 + put_device(&cpu_pdev->dev);
5861 fail:
5862 of_node_put(cpu_np);
5863
5864 diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
5865 index c29200cf755a..9b9a7ec52905 100644
5866 --- a/sound/soc/fsl/imx-sgtl5000.c
5867 +++ b/sound/soc/fsl/imx-sgtl5000.c
5868 @@ -108,6 +108,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
5869 ret = -EPROBE_DEFER;
5870 goto fail;
5871 }
5872 + put_device(&ssi_pdev->dev);
5873 codec_dev = of_find_i2c_device_by_node(codec_np);
5874 if (!codec_dev) {
5875 dev_err(&pdev->dev, "failed to find codec platform device\n");
5876 diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
5877 index 4715527054e5..5661025e8cec 100644
5878 --- a/sound/soc/qcom/common.c
5879 +++ b/sound/soc/qcom/common.c
5880 @@ -42,6 +42,9 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
5881 link = card->dai_link;
5882 for_each_child_of_node(dev->of_node, np) {
5883 cpu = of_get_child_by_name(np, "cpu");
5884 + platform = of_get_child_by_name(np, "platform");
5885 + codec = of_get_child_by_name(np, "codec");
5886 +
5887 if (!cpu) {
5888 dev_err(dev, "Can't find cpu DT node\n");
5889 ret = -EINVAL;
5890 @@ -63,8 +66,6 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
5891 goto err;
5892 }
5893
5894 - platform = of_get_child_by_name(np, "platform");
5895 - codec = of_get_child_by_name(np, "codec");
5896 if (codec && platform) {
5897 link->platform_of_node = of_parse_phandle(platform,
5898 "sound-dai",
5899 @@ -100,10 +101,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
5900 link->dpcm_capture = 1;
5901 link->stream_name = link->name;
5902 link++;
5903 +
5904 + of_node_put(cpu);
5905 + of_node_put(codec);
5906 + of_node_put(platform);
5907 }
5908
5909 return 0;
5910 err:
5911 + of_node_put(np);
5912 of_node_put(cpu);
5913 of_node_put(codec);
5914 of_node_put(platform);
5915 diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
5916 index d49902e818b5..3624557550a1 100644
5917 --- a/tools/lib/bpf/Makefile
5918 +++ b/tools/lib/bpf/Makefile
5919 @@ -149,7 +149,8 @@ CMD_TARGETS = $(LIB_FILE)
5920
5921 TARGETS = $(CMD_TARGETS)
5922
5923 -all: fixdep all_cmd
5924 +all: fixdep
5925 + $(Q)$(MAKE) all_cmd
5926
5927 all_cmd: $(CMD_TARGETS)
5928
5929 diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
5930 index 75de355a63d6..10985d991ed2 100644
5931 --- a/tools/lib/traceevent/event-parse.c
5932 +++ b/tools/lib/traceevent/event-parse.c
5933 @@ -2416,7 +2416,7 @@ static int arg_num_eval(struct print_arg *arg, long long *val)
5934 static char *arg_eval (struct print_arg *arg)
5935 {
5936 long long val;
5937 - static char buf[20];
5938 + static char buf[24];
5939
5940 switch (arg->type) {
5941 case PRINT_ATOM:
5942 diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
5943 index f3aa9d02a5ab..763c2edf52e7 100644
5944 --- a/tools/perf/builtin-c2c.c
5945 +++ b/tools/perf/builtin-c2c.c
5946 @@ -2055,6 +2055,12 @@ static int setup_nodes(struct perf_session *session)
5947 if (!set)
5948 return -ENOMEM;
5949
5950 + nodes[node] = set;
5951 +
5952 + /* empty node, skip */
5953 + if (cpu_map__empty(map))
5954 + continue;
5955 +
5956 for (cpu = 0; cpu < map->nr; cpu++) {
5957 set_bit(map->map[cpu], set);
5958
5959 @@ -2063,8 +2069,6 @@ static int setup_nodes(struct perf_session *session)
5960
5961 cpu2node[map->map[cpu]] = node;
5962 }
5963 -
5964 - nodes[node] = set;
5965 }
5966
5967 setup_nodes_header();
5968 diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
5969 index 67bcbf876776..d0406116c905 100644
5970 --- a/tools/perf/tests/evsel-tp-sched.c
5971 +++ b/tools/perf/tests/evsel-tp-sched.c
5972 @@ -43,7 +43,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
5973 return -1;
5974 }
5975
5976 - if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
5977 + if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
5978 ret = -1;
5979
5980 if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
5981 @@ -55,7 +55,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
5982 if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
5983 ret = -1;
5984
5985 - if (perf_evsel__test_field(evsel, "next_comm", 16, true))
5986 + if (perf_evsel__test_field(evsel, "next_comm", 16, false))
5987 ret = -1;
5988
5989 if (perf_evsel__test_field(evsel, "next_pid", 4, true))
5990 @@ -73,7 +73,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
5991 return -1;
5992 }
5993
5994 - if (perf_evsel__test_field(evsel, "comm", 16, true))
5995 + if (perf_evsel__test_field(evsel, "comm", 16, false))
5996 ret = -1;
5997
5998 if (perf_evsel__test_field(evsel, "pid", 4, true))
5999 diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
6000 index 28cd6a17491b..dfee110b3a58 100644
6001 --- a/tools/perf/util/annotate.c
6002 +++ b/tools/perf/util/annotate.c
6003 @@ -1862,6 +1862,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
6004 struct annotation_options *options,
6005 struct arch **parch)
6006 {
6007 + struct annotation *notes = symbol__annotation(sym);
6008 struct annotate_args args = {
6009 .privsize = privsize,
6010 .evsel = evsel,
6011 @@ -1892,6 +1893,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
6012
6013 args.ms.map = map;
6014 args.ms.sym = sym;
6015 + notes->start = map__rip_2objdump(map, sym->start);
6016
6017 return symbol__disassemble(sym, &args);
6018 }
6019 @@ -2746,8 +2748,6 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
6020
6021 symbol__calc_percent(sym, evsel);
6022
6023 - notes->start = map__rip_2objdump(map, sym->start);
6024 -
6025 annotation__set_offsets(notes, size);
6026 annotation__mark_jump_targets(notes, sym);
6027 annotation__compute_ipc(notes, size);
6028 diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
6029 index aa7f8c11fbb7..910f2621d211 100644
6030 --- a/tools/perf/util/s390-cpumsf.c
6031 +++ b/tools/perf/util/s390-cpumsf.c
6032 @@ -294,6 +294,11 @@ static bool s390_cpumsf_validate(int machine_type,
6033 *dsdes = 85;
6034 *bsdes = 32;
6035 break;
6036 + case 2964:
6037 + case 2965:
6038 + *dsdes = 112;
6039 + *bsdes = 32;
6040 + break;
6041 default:
6042 /* Illegal trailer entry */
6043 return false;
6044 diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
6045 index 05d95de14e20..9569cc06e0a7 100644
6046 --- a/tools/perf/util/scripting-engines/trace-event-python.c
6047 +++ b/tools/perf/util/scripting-engines/trace-event-python.c
6048 @@ -733,8 +733,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
6049 Py_FatalError("couldn't create Python dictionary");
6050
6051 pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel)));
6052 - pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize(
6053 - (const char *)&evsel->attr, sizeof(evsel->attr)));
6054 + pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->attr, sizeof(evsel->attr)));
6055
6056 pydict_set_item_string_decref(dict_sample, "pid",
6057 _PyLong_FromLong(sample->pid));
6058 @@ -1494,34 +1493,40 @@ static void _free_command_line(wchar_t **command_line, int num)
6059 static int python_start_script(const char *script, int argc, const char **argv)
6060 {
6061 struct tables *tables = &tables_global;
6062 + PyMODINIT_FUNC (*initfunc)(void);
6063 #if PY_MAJOR_VERSION < 3
6064 const char **command_line;
6065 #else
6066 wchar_t **command_line;
6067 #endif
6068 - char buf[PATH_MAX];
6069 + /*
6070 + * Use a non-const name variable to cope with python 2.6's
6071 + * PyImport_AppendInittab prototype
6072 + */
6073 + char buf[PATH_MAX], name[19] = "perf_trace_context";
6074 int i, err = 0;
6075 FILE *fp;
6076
6077 #if PY_MAJOR_VERSION < 3
6078 + initfunc = initperf_trace_context;
6079 command_line = malloc((argc + 1) * sizeof(const char *));
6080 command_line[0] = script;
6081 for (i = 1; i < argc + 1; i++)
6082 command_line[i] = argv[i - 1];
6083 #else
6084 + initfunc = PyInit_perf_trace_context;
6085 command_line = malloc((argc + 1) * sizeof(wchar_t *));
6086 command_line[0] = Py_DecodeLocale(script, NULL);
6087 for (i = 1; i < argc + 1; i++)
6088 command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
6089 #endif
6090
6091 + PyImport_AppendInittab(name, initfunc);
6092 Py_Initialize();
6093
6094 #if PY_MAJOR_VERSION < 3
6095 - initperf_trace_context();
6096 PySys_SetArgv(argc + 1, (char **)command_line);
6097 #else
6098 - PyInit_perf_trace_context();
6099 PySys_SetArgv(argc + 1, command_line);
6100 #endif
6101
6102 diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
6103 index b284276ec963..46daa22b86e3 100644
6104 --- a/tools/perf/util/sort.c
6105 +++ b/tools/perf/util/sort.c
6106 @@ -229,8 +229,14 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
6107 if (sym_l == sym_r)
6108 return 0;
6109
6110 - if (sym_l->inlined || sym_r->inlined)
6111 - return strcmp(sym_l->name, sym_r->name);
6112 + if (sym_l->inlined || sym_r->inlined) {
6113 + int ret = strcmp(sym_l->name, sym_r->name);
6114 +
6115 + if (ret)
6116 + return ret;
6117 + if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
6118 + return 0;
6119 + }
6120
6121 if (sym_l->start != sym_r->start)
6122 return (int64_t)(sym_r->start - sym_l->start);
6123 diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
6124 index e767c4a9d4d2..af3f9b9f1e8b 100644
6125 --- a/tools/perf/util/srcline.c
6126 +++ b/tools/perf/util/srcline.c
6127 @@ -104,7 +104,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
6128 } else {
6129 /* create a fake symbol for the inline frame */
6130 inline_sym = symbol__new(base_sym ? base_sym->start : 0,
6131 - base_sym ? base_sym->end : 0,
6132 + base_sym ? (base_sym->end - base_sym->start) : 0,
6133 base_sym ? base_sym->binding : 0,
6134 base_sym ? base_sym->type : 0,
6135 funcname);
6136 diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
6137 index 9db5a7378f40..294fc18aba2a 100644
6138 --- a/tools/testing/selftests/bpf/test_verifier.c
6139 +++ b/tools/testing/selftests/bpf/test_verifier.c
6140 @@ -32,6 +32,7 @@
6141 #include <linux/if_ether.h>
6142
6143 #include <bpf/bpf.h>
6144 +#include <bpf/libbpf.h>
6145
6146 #ifdef HAVE_GENHDR
6147 # include "autoconf.h"
6148 @@ -56,6 +57,7 @@
6149
6150 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
6151 static bool unpriv_disabled = false;
6152 +static int skips;
6153
6154 struct bpf_test {
6155 const char *descr;
6156 @@ -12770,6 +12772,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
6157 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
6158 prog, prog_len, test->flags & F_LOAD_WITH_STRICT_ALIGNMENT,
6159 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
6160 + if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
6161 + printf("SKIP (unsupported program type %d)\n", prog_type);
6162 + skips++;
6163 + goto close_fds;
6164 + }
6165
6166 expected_ret = unpriv && test->result_unpriv != UNDEF ?
6167 test->result_unpriv : test->result;
6168 @@ -12905,7 +12912,7 @@ static void get_unpriv_disabled()
6169
6170 static int do_test(bool unpriv, unsigned int from, unsigned int to)
6171 {
6172 - int i, passes = 0, errors = 0, skips = 0;
6173 + int i, passes = 0, errors = 0;
6174
6175 for (i = from; i < to; i++) {
6176 struct bpf_test *test = &tests[i];
6177 diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
6178 index 83057fa9d391..14cad657bc6a 100644
6179 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
6180 +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
6181 @@ -2920,6 +2920,12 @@ TEST(get_metadata)
6182 struct seccomp_metadata md;
6183 long ret;
6184
6185 + /* Only real root can get metadata. */
6186 + if (geteuid()) {
6187 + XFAIL(return, "get_metadata requires real root");
6188 + return;
6189 + }
6190 +
6191 ASSERT_EQ(0, pipe(pipefd));
6192
6193 pid = fork();