Magellan Linux

Contents of /trunk/kernel-magellan/patches-5.0/0106-5.0.7-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3333 - (show annotations) (download)
Fri Apr 26 12:20:33 2019 UTC (5 years ago) by niro
File size: 307346 byte(s)
-linux-5.0.7
1 diff --git a/Documentation/arm/kernel_mode_neon.txt b/Documentation/arm/kernel_mode_neon.txt
2 index 525452726d31..b9e060c5b61e 100644
3 --- a/Documentation/arm/kernel_mode_neon.txt
4 +++ b/Documentation/arm/kernel_mode_neon.txt
5 @@ -6,7 +6,7 @@ TL;DR summary
6 * Use only NEON instructions, or VFP instructions that don't rely on support
7 code
8 * Isolate your NEON code in a separate compilation unit, and compile it with
9 - '-mfpu=neon -mfloat-abi=softfp'
10 + '-march=armv7-a -mfpu=neon -mfloat-abi=softfp'
11 * Put kernel_neon_begin() and kernel_neon_end() calls around the calls into your
12 NEON code
13 * Don't sleep in your NEON code, and be aware that it will be executed with
14 @@ -87,7 +87,7 @@ instructions appearing in unexpected places if no special care is taken.
15 Therefore, the recommended and only supported way of using NEON/VFP in the
16 kernel is by adhering to the following rules:
17 * isolate the NEON code in a separate compilation unit and compile it with
18 - '-mfpu=neon -mfloat-abi=softfp';
19 + '-march=armv7-a -mfpu=neon -mfloat-abi=softfp';
20 * issue the calls to kernel_neon_begin(), kernel_neon_end() as well as the calls
21 into the unit containing the NEON code from a compilation unit which is *not*
22 built with the GCC flag '-mfpu=neon' set.
23 diff --git a/Makefile b/Makefile
24 index 3ee390feea61..af99c77c7066 100644
25 --- a/Makefile
26 +++ b/Makefile
27 @@ -1,7 +1,7 @@
28 # SPDX-License-Identifier: GPL-2.0
29 VERSION = 5
30 PATCHLEVEL = 0
31 -SUBLEVEL = 6
32 +SUBLEVEL = 7
33 EXTRAVERSION =
34 NAME = Shy Crocodile
35
36 @@ -15,19 +15,6 @@ NAME = Shy Crocodile
37 PHONY := _all
38 _all:
39
40 -# Do not use make's built-in rules and variables
41 -# (this increases performance and avoids hard-to-debug behaviour)
42 -MAKEFLAGS += -rR
43 -
44 -# Avoid funny character set dependencies
45 -unexport LC_ALL
46 -LC_COLLATE=C
47 -LC_NUMERIC=C
48 -export LC_COLLATE LC_NUMERIC
49 -
50 -# Avoid interference with shell env settings
51 -unexport GREP_OPTIONS
52 -
53 # We are using a recursive build, so we need to do a little thinking
54 # to get the ordering right.
55 #
56 @@ -44,6 +31,21 @@ unexport GREP_OPTIONS
57 # descending is started. They are now explicitly listed as the
58 # prepare rule.
59
60 +ifneq ($(sub-make-done),1)
61 +
62 +# Do not use make's built-in rules and variables
63 +# (this increases performance and avoids hard-to-debug behaviour)
64 +MAKEFLAGS += -rR
65 +
66 +# Avoid funny character set dependencies
67 +unexport LC_ALL
68 +LC_COLLATE=C
69 +LC_NUMERIC=C
70 +export LC_COLLATE LC_NUMERIC
71 +
72 +# Avoid interference with shell env settings
73 +unexport GREP_OPTIONS
74 +
75 # Beautify output
76 # ---------------------------------------------------------------------------
77 #
78 @@ -112,7 +114,6 @@ export quiet Q KBUILD_VERBOSE
79
80 # KBUILD_SRC is not intended to be used by the regular user (for now),
81 # it is set on invocation of make with KBUILD_OUTPUT or O= specified.
82 -ifeq ($(KBUILD_SRC),)
83
84 # OK, Make called in directory where kernel src resides
85 # Do we want to locate output files in a separate directory?
86 @@ -142,6 +143,24 @@ $(if $(KBUILD_OUTPUT),, \
87 # 'sub-make' below.
88 MAKEFLAGS += --include-dir=$(CURDIR)
89
90 +need-sub-make := 1
91 +else
92 +
93 +# Do not print "Entering directory ..." at all for in-tree build.
94 +MAKEFLAGS += --no-print-directory
95 +
96 +endif # ifneq ($(KBUILD_OUTPUT),)
97 +
98 +ifneq ($(filter 3.%,$(MAKE_VERSION)),)
99 +# 'MAKEFLAGS += -rR' does not immediately become effective for GNU Make 3.x
100 +# We need to invoke sub-make to avoid implicit rules in the top Makefile.
101 +need-sub-make := 1
102 +# Cancel implicit rules for this Makefile.
103 +$(lastword $(MAKEFILE_LIST)): ;
104 +endif
105 +
106 +ifeq ($(need-sub-make),1)
107 +
108 PHONY += $(MAKECMDGOALS) sub-make
109
110 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
111 @@ -149,16 +168,15 @@ $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
112
113 # Invoke a second make in the output directory, passing relevant variables
114 sub-make:
115 - $(Q)$(MAKE) -C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR) \
116 + $(Q)$(MAKE) sub-make-done=1 \
117 + $(if $(KBUILD_OUTPUT),-C $(KBUILD_OUTPUT) KBUILD_SRC=$(CURDIR)) \
118 -f $(CURDIR)/Makefile $(filter-out _all sub-make,$(MAKECMDGOALS))
119
120 -# Leave processing to above invocation of make
121 -skip-makefile := 1
122 -endif # ifneq ($(KBUILD_OUTPUT),)
123 -endif # ifeq ($(KBUILD_SRC),)
124 +endif # need-sub-make
125 +endif # sub-make-done
126
127 # We process the rest of the Makefile if this is the final invocation of make
128 -ifeq ($(skip-makefile),)
129 +ifeq ($(need-sub-make),)
130
131 # Do not print "Entering directory ...",
132 # but we want to display it when entering to the output directory
133 @@ -625,12 +643,15 @@ ifeq ($(may-sync-config),1)
134 -include include/config/auto.conf.cmd
135
136 # To avoid any implicit rule to kick in, define an empty command
137 -$(KCONFIG_CONFIG) include/config/auto.conf.cmd: ;
138 +$(KCONFIG_CONFIG): ;
139
140 # The actual configuration files used during the build are stored in
141 # include/generated/ and include/config/. Update them if .config is newer than
142 # include/config/auto.conf (which mirrors .config).
143 -include/config/%.conf: $(KCONFIG_CONFIG) include/config/auto.conf.cmd
144 +#
145 +# This exploits the 'multi-target pattern rule' trick.
146 +# The syncconfig should be executed only once to make all the targets.
147 +%/auto.conf %/auto.conf.cmd %/tristate.conf: $(KCONFIG_CONFIG)
148 $(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
149 else
150 # External modules and some install targets need include/generated/autoconf.h
151 @@ -1756,7 +1777,7 @@ $(cmd_files): ; # Do not try to update included dependency files
152
153 endif # ifeq ($(config-targets),1)
154 endif # ifeq ($(mixed-targets),1)
155 -endif # skip-makefile
156 +endif # need-sub-make
157
158 PHONY += FORCE
159 FORCE:
160 diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
161 index b7303a4e4236..ed0d6fb20122 100644
162 --- a/arch/arm/boot/dts/lpc32xx.dtsi
163 +++ b/arch/arm/boot/dts/lpc32xx.dtsi
164 @@ -230,7 +230,7 @@
165 status = "disabled";
166 };
167
168 - i2s1: i2s@2009C000 {
169 + i2s1: i2s@2009c000 {
170 compatible = "nxp,lpc3220-i2s";
171 reg = <0x2009C000 0x1000>;
172 };
173 @@ -273,7 +273,7 @@
174 status = "disabled";
175 };
176
177 - i2c1: i2c@400A0000 {
178 + i2c1: i2c@400a0000 {
179 compatible = "nxp,pnx-i2c";
180 reg = <0x400A0000 0x100>;
181 interrupt-parent = <&sic1>;
182 @@ -284,7 +284,7 @@
183 clocks = <&clk LPC32XX_CLK_I2C1>;
184 };
185
186 - i2c2: i2c@400A8000 {
187 + i2c2: i2c@400a8000 {
188 compatible = "nxp,pnx-i2c";
189 reg = <0x400A8000 0x100>;
190 interrupt-parent = <&sic1>;
191 @@ -295,7 +295,7 @@
192 clocks = <&clk LPC32XX_CLK_I2C2>;
193 };
194
195 - mpwm: mpwm@400E8000 {
196 + mpwm: mpwm@400e8000 {
197 compatible = "nxp,lpc3220-motor-pwm";
198 reg = <0x400E8000 0x78>;
199 status = "disabled";
200 @@ -394,7 +394,7 @@
201 #gpio-cells = <3>; /* bank, pin, flags */
202 };
203
204 - timer4: timer@4002C000 {
205 + timer4: timer@4002c000 {
206 compatible = "nxp,lpc3220-timer";
207 reg = <0x4002C000 0x1000>;
208 interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
209 @@ -412,7 +412,7 @@
210 status = "disabled";
211 };
212
213 - watchdog: watchdog@4003C000 {
214 + watchdog: watchdog@4003c000 {
215 compatible = "nxp,pnx4008-wdt";
216 reg = <0x4003C000 0x1000>;
217 clocks = <&clk LPC32XX_CLK_WDOG>;
218 @@ -451,7 +451,7 @@
219 status = "disabled";
220 };
221
222 - timer1: timer@4004C000 {
223 + timer1: timer@4004c000 {
224 compatible = "nxp,lpc3220-timer";
225 reg = <0x4004C000 0x1000>;
226 interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
227 @@ -475,7 +475,7 @@
228 status = "disabled";
229 };
230
231 - pwm1: pwm@4005C000 {
232 + pwm1: pwm@4005c000 {
233 compatible = "nxp,lpc3220-pwm";
234 reg = <0x4005C000 0x4>;
235 clocks = <&clk LPC32XX_CLK_PWM1>;
236 @@ -484,7 +484,7 @@
237 status = "disabled";
238 };
239
240 - pwm2: pwm@4005C004 {
241 + pwm2: pwm@4005c004 {
242 compatible = "nxp,lpc3220-pwm";
243 reg = <0x4005C004 0x4>;
244 clocks = <&clk LPC32XX_CLK_PWM2>;
245 diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi
246 index 22d775460767..dc125769fe85 100644
247 --- a/arch/arm/boot/dts/meson8b.dtsi
248 +++ b/arch/arm/boot/dts/meson8b.dtsi
249 @@ -270,9 +270,7 @@
250 groups = "eth_tx_clk",
251 "eth_tx_en",
252 "eth_txd1_0",
253 - "eth_txd1_1",
254 "eth_txd0_0",
255 - "eth_txd0_1",
256 "eth_rx_clk",
257 "eth_rx_dv",
258 "eth_rxd1",
259 @@ -281,7 +279,9 @@
260 "eth_mdc",
261 "eth_ref_clk",
262 "eth_txd2",
263 - "eth_txd3";
264 + "eth_txd3",
265 + "eth_rxd3",
266 + "eth_rxd2";
267 function = "ethernet";
268 bias-disable;
269 };
270 diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h
271 index 69772e742a0a..83ae97c049d9 100644
272 --- a/arch/arm/include/asm/barrier.h
273 +++ b/arch/arm/include/asm/barrier.h
274 @@ -11,6 +11,8 @@
275 #define sev() __asm__ __volatile__ ("sev" : : : "memory")
276 #define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
277 #define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
278 +#else
279 +#define wfe() do { } while (0)
280 #endif
281
282 #if __LINUX_ARM_ARCH__ >= 7
283 diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
284 index 120f4c9bbfde..57fe73ea0f72 100644
285 --- a/arch/arm/include/asm/processor.h
286 +++ b/arch/arm/include/asm/processor.h
287 @@ -89,7 +89,11 @@ extern void release_thread(struct task_struct *);
288 unsigned long get_wchan(struct task_struct *p);
289
290 #if __LINUX_ARM_ARCH__ == 6 || defined(CONFIG_ARM_ERRATA_754327)
291 -#define cpu_relax() smp_mb()
292 +#define cpu_relax() \
293 + do { \
294 + smp_mb(); \
295 + __asm__ __volatile__("nop; nop; nop; nop; nop; nop; nop; nop; nop; nop;"); \
296 + } while (0)
297 #else
298 #define cpu_relax() barrier()
299 #endif
300 diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
301 index 187ccf6496ad..2cb00d15831b 100644
302 --- a/arch/arm/include/asm/v7m.h
303 +++ b/arch/arm/include/asm/v7m.h
304 @@ -49,7 +49,7 @@
305 * (0 -> msp; 1 -> psp). Bits [1:0] are fixed to 0b01.
306 */
307 #define EXC_RET_STACK_MASK 0x00000004
308 -#define EXC_RET_THREADMODE_PROCESSSTACK 0xfffffffd
309 +#define EXC_RET_THREADMODE_PROCESSSTACK (3 << 2)
310
311 /* Cache related definitions */
312
313 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
314 index 773424843d6e..62db1c9746cb 100644
315 --- a/arch/arm/kernel/entry-header.S
316 +++ b/arch/arm/kernel/entry-header.S
317 @@ -127,7 +127,8 @@
318 */
319 .macro v7m_exception_slow_exit ret_r0
320 cpsid i
321 - ldr lr, =EXC_RET_THREADMODE_PROCESSSTACK
322 + ldr lr, =exc_ret
323 + ldr lr, [lr]
324
325 @ read original r12, sp, lr, pc and xPSR
326 add r12, sp, #S_IP
327 diff --git a/arch/arm/kernel/entry-v7m.S b/arch/arm/kernel/entry-v7m.S
328 index abcf47848525..19d2dcd6530d 100644
329 --- a/arch/arm/kernel/entry-v7m.S
330 +++ b/arch/arm/kernel/entry-v7m.S
331 @@ -146,3 +146,7 @@ ENTRY(vector_table)
332 .rept CONFIG_CPU_V7M_NUM_IRQ
333 .long __irq_entry @ External Interrupts
334 .endr
335 + .align 2
336 + .globl exc_ret
337 +exc_ret:
338 + .space 4
339 diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
340 index dd2eb5f76b9f..76300f3813e8 100644
341 --- a/arch/arm/kernel/machine_kexec.c
342 +++ b/arch/arm/kernel/machine_kexec.c
343 @@ -91,8 +91,11 @@ void machine_crash_nonpanic_core(void *unused)
344
345 set_cpu_online(smp_processor_id(), false);
346 atomic_dec(&waiting_for_crash_ipi);
347 - while (1)
348 +
349 + while (1) {
350 cpu_relax();
351 + wfe();
352 + }
353 }
354
355 void crash_smp_send_stop(void)
356 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
357 index 1d6f5ea522f4..a3ce7c5365fa 100644
358 --- a/arch/arm/kernel/smp.c
359 +++ b/arch/arm/kernel/smp.c
360 @@ -604,8 +604,10 @@ static void ipi_cpu_stop(unsigned int cpu)
361 local_fiq_disable();
362 local_irq_disable();
363
364 - while (1)
365 + while (1) {
366 cpu_relax();
367 + wfe();
368 + }
369 }
370
371 static DEFINE_PER_CPU(struct completion *, cpu_completion);
372 diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
373 index 0bee233fef9a..314cfb232a63 100644
374 --- a/arch/arm/kernel/unwind.c
375 +++ b/arch/arm/kernel/unwind.c
376 @@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[];
377 static const struct unwind_idx *__origin_unwind_idx;
378 extern const struct unwind_idx __stop_unwind_idx[];
379
380 -static DEFINE_SPINLOCK(unwind_lock);
381 +static DEFINE_RAW_SPINLOCK(unwind_lock);
382 static LIST_HEAD(unwind_tables);
383
384 /* Convert a prel31 symbol to an absolute address */
385 @@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
386 /* module unwind tables */
387 struct unwind_table *table;
388
389 - spin_lock_irqsave(&unwind_lock, flags);
390 + raw_spin_lock_irqsave(&unwind_lock, flags);
391 list_for_each_entry(table, &unwind_tables, list) {
392 if (addr >= table->begin_addr &&
393 addr < table->end_addr) {
394 @@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr)
395 break;
396 }
397 }
398 - spin_unlock_irqrestore(&unwind_lock, flags);
399 + raw_spin_unlock_irqrestore(&unwind_lock, flags);
400 }
401
402 pr_debug("%s: idx = %p\n", __func__, idx);
403 @@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
404 tab->begin_addr = text_addr;
405 tab->end_addr = text_addr + text_size;
406
407 - spin_lock_irqsave(&unwind_lock, flags);
408 + raw_spin_lock_irqsave(&unwind_lock, flags);
409 list_add_tail(&tab->list, &unwind_tables);
410 - spin_unlock_irqrestore(&unwind_lock, flags);
411 + raw_spin_unlock_irqrestore(&unwind_lock, flags);
412
413 return tab;
414 }
415 @@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab)
416 if (!tab)
417 return;
418
419 - spin_lock_irqsave(&unwind_lock, flags);
420 + raw_spin_lock_irqsave(&unwind_lock, flags);
421 list_del(&tab->list);
422 - spin_unlock_irqrestore(&unwind_lock, flags);
423 + raw_spin_unlock_irqrestore(&unwind_lock, flags);
424
425 kfree(tab);
426 }
427 diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
428 index ad25fd1872c7..0bff0176db2c 100644
429 --- a/arch/arm/lib/Makefile
430 +++ b/arch/arm/lib/Makefile
431 @@ -39,7 +39,7 @@ $(obj)/csumpartialcopy.o: $(obj)/csumpartialcopygeneric.S
432 $(obj)/csumpartialcopyuser.o: $(obj)/csumpartialcopygeneric.S
433
434 ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
435 - NEON_FLAGS := -mfloat-abi=softfp -mfpu=neon
436 + NEON_FLAGS := -march=armv7-a -mfloat-abi=softfp -mfpu=neon
437 CFLAGS_xor-neon.o += $(NEON_FLAGS)
438 obj-$(CONFIG_XOR_BLOCKS) += xor-neon.o
439 endif
440 diff --git a/arch/arm/lib/xor-neon.c b/arch/arm/lib/xor-neon.c
441 index 2c40aeab3eaa..c691b901092f 100644
442 --- a/arch/arm/lib/xor-neon.c
443 +++ b/arch/arm/lib/xor-neon.c
444 @@ -14,7 +14,7 @@
445 MODULE_LICENSE("GPL");
446
447 #ifndef __ARM_NEON__
448 -#error You should compile this file with '-mfloat-abi=softfp -mfpu=neon'
449 +#error You should compile this file with '-march=armv7-a -mfloat-abi=softfp -mfpu=neon'
450 #endif
451
452 /*
453 diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
454 index 058a37e6d11c..fd6e0671f957 100644
455 --- a/arch/arm/mach-omap2/prm_common.c
456 +++ b/arch/arm/mach-omap2/prm_common.c
457 @@ -523,8 +523,10 @@ void omap_prm_reset_system(void)
458
459 prm_ll_data->reset_system();
460
461 - while (1)
462 + while (1) {
463 cpu_relax();
464 + wfe();
465 + }
466 }
467
468 /**
469 diff --git a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
470 index 8e50daa99151..dc526ef2e9b3 100644
471 --- a/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
472 +++ b/arch/arm/mach-shmobile/regulator-quirk-rcar-gen2.c
473 @@ -40,6 +40,7 @@
474 struct regulator_quirk {
475 struct list_head list;
476 const struct of_device_id *id;
477 + struct device_node *np;
478 struct of_phandle_args irq_args;
479 struct i2c_msg i2c_msg;
480 bool shared; /* IRQ line is shared */
481 @@ -101,6 +102,9 @@ static int regulator_quirk_notify(struct notifier_block *nb,
482 if (!pos->shared)
483 continue;
484
485 + if (pos->np->parent != client->dev.parent->of_node)
486 + continue;
487 +
488 dev_info(&client->dev, "clearing %s@0x%02x interrupts\n",
489 pos->id->compatible, pos->i2c_msg.addr);
490
491 @@ -165,6 +169,7 @@ static int __init rcar_gen2_regulator_quirk(void)
492 memcpy(&quirk->i2c_msg, id->data, sizeof(quirk->i2c_msg));
493
494 quirk->id = id;
495 + quirk->np = np;
496 quirk->i2c_msg.addr = addr;
497
498 ret = of_irq_parse_one(np, 0, argsa);
499 diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
500 index b03202cddddb..f74cdce6d4da 100644
501 --- a/arch/arm/mm/copypage-v4mc.c
502 +++ b/arch/arm/mm/copypage-v4mc.c
503 @@ -45,6 +45,7 @@ static void mc_copy_user_page(void *from, void *to)
504 int tmp;
505
506 asm volatile ("\
507 + .syntax unified\n\
508 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
509 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
510 stmia %1!, {r2, r3, ip, lr} @ 4\n\
511 @@ -56,7 +57,7 @@ static void mc_copy_user_page(void *from, void *to)
512 ldmia %0!, {r2, r3, ip, lr} @ 4\n\
513 subs %2, %2, #1 @ 1\n\
514 stmia %1!, {r2, r3, ip, lr} @ 4\n\
515 - ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
516 + ldmiane %0!, {r2, r3, ip, lr} @ 4\n\
517 bne 1b @ "
518 : "+&r" (from), "+&r" (to), "=&r" (tmp)
519 : "2" (PAGE_SIZE / 64)
520 diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
521 index cd3e165afeed..6d336740aae4 100644
522 --- a/arch/arm/mm/copypage-v4wb.c
523 +++ b/arch/arm/mm/copypage-v4wb.c
524 @@ -27,6 +27,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
525 int tmp;
526
527 asm volatile ("\
528 + .syntax unified\n\
529 ldmia %1!, {r3, r4, ip, lr} @ 4\n\
530 1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
531 stmia %0!, {r3, r4, ip, lr} @ 4\n\
532 @@ -38,7 +39,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
533 ldmia %1!, {r3, r4, ip, lr} @ 4\n\
534 subs %2, %2, #1 @ 1\n\
535 stmia %0!, {r3, r4, ip, lr} @ 4\n\
536 - ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
537 + ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
538 bne 1b @ 1\n\
539 mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB"
540 : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
541 diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
542 index 8614572e1296..3851bb396442 100644
543 --- a/arch/arm/mm/copypage-v4wt.c
544 +++ b/arch/arm/mm/copypage-v4wt.c
545 @@ -25,6 +25,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
546 int tmp;
547
548 asm volatile ("\
549 + .syntax unified\n\
550 ldmia %1!, {r3, r4, ip, lr} @ 4\n\
551 1: stmia %0!, {r3, r4, ip, lr} @ 4\n\
552 ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
553 @@ -34,7 +35,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
554 ldmia %1!, {r3, r4, ip, lr} @ 4\n\
555 subs %2, %2, #1 @ 1\n\
556 stmia %0!, {r3, r4, ip, lr} @ 4\n\
557 - ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
558 + ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
559 bne 1b @ 1\n\
560 mcr p15, 0, %2, c7, c7, 0 @ flush ID cache"
561 : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
562 diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
563 index 47a5acc64433..92e84181933a 100644
564 --- a/arch/arm/mm/proc-v7m.S
565 +++ b/arch/arm/mm/proc-v7m.S
566 @@ -139,6 +139,9 @@ __v7m_setup_cont:
567 cpsie i
568 svc #0
569 1: cpsid i
570 + ldr r0, =exc_ret
571 + orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
572 + str lr, [r0]
573 ldmia sp, {r0-r3, r12}
574 str r5, [r12, #11 * 4] @ restore the original SVC vector entry
575 mov lr, r6 @ restore LR
576 diff --git a/arch/h8300/Makefile b/arch/h8300/Makefile
577 index f801f3708a89..ba0f26cfad61 100644
578 --- a/arch/h8300/Makefile
579 +++ b/arch/h8300/Makefile
580 @@ -27,7 +27,7 @@ KBUILD_LDFLAGS += $(ldflags-y)
581 CHECKFLAGS += -msize-long
582
583 ifeq ($(CROSS_COMPILE),)
584 -CROSS_COMPILE := h8300-unknown-linux-
585 +CROSS_COMPILE := $(call cc-cross-prefix, h8300-unknown-linux- h8300-linux-)
586 endif
587
588 core-y += arch/$(ARCH)/kernel/ arch/$(ARCH)/mm/
589 diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
590 index a4a718dbfec6..f85e2b01c3df 100644
591 --- a/arch/powerpc/include/asm/topology.h
592 +++ b/arch/powerpc/include/asm/topology.h
593 @@ -132,6 +132,8 @@ static inline void shared_proc_topology_init(void) {}
594 #define topology_sibling_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
595 #define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
596 #define topology_core_id(cpu) (cpu_to_core_id(cpu))
597 +
598 +int dlpar_cpu_readd(int cpu);
599 #endif
600 #endif
601
602 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
603 index 435927f549c4..a2c168b395d2 100644
604 --- a/arch/powerpc/kernel/entry_64.S
605 +++ b/arch/powerpc/kernel/entry_64.S
606 @@ -1002,6 +1002,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
607 ld r2,_NIP(r1)
608 mtspr SPRN_SRR0,r2
609
610 + /*
611 + * Leaving a stale exception_marker on the stack can confuse
612 + * the reliable stack unwinder later on. Clear it.
613 + */
614 + li r2,0
615 + std r2,STACK_FRAME_OVERHEAD-16(r1)
616 +
617 ld r0,GPR0(r1)
618 ld r2,GPR2(r1)
619 ld r3,GPR3(r1)
620 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
621 index 53151698bfe0..d9ac7d94656e 100644
622 --- a/arch/powerpc/kernel/ptrace.c
623 +++ b/arch/powerpc/kernel/ptrace.c
624 @@ -33,6 +33,7 @@
625 #include <linux/hw_breakpoint.h>
626 #include <linux/perf_event.h>
627 #include <linux/context_tracking.h>
628 +#include <linux/nospec.h>
629
630 #include <linux/uaccess.h>
631 #include <linux/pkeys.h>
632 @@ -274,6 +275,8 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
633 */
634 int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
635 {
636 + unsigned int regs_max;
637 +
638 if ((task->thread.regs == NULL) || !data)
639 return -EIO;
640
641 @@ -297,7 +300,9 @@ int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
642 }
643 #endif
644
645 - if (regno < (sizeof(struct user_pt_regs) / sizeof(unsigned long))) {
646 + regs_max = sizeof(struct user_pt_regs) / sizeof(unsigned long);
647 + if (regno < regs_max) {
648 + regno = array_index_nospec(regno, regs_max);
649 *data = ((unsigned long *)task->thread.regs)[regno];
650 return 0;
651 }
652 @@ -321,6 +326,7 @@ int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
653 return set_user_dscr(task, data);
654
655 if (regno <= PT_MAX_PUT_REG) {
656 + regno = array_index_nospec(regno, PT_MAX_PUT_REG + 1);
657 ((unsigned long *)task->thread.regs)[regno] = data;
658 return 0;
659 }
660 diff --git a/arch/powerpc/mm/hugetlbpage-radix.c b/arch/powerpc/mm/hugetlbpage-radix.c
661 index 2486bee0f93e..97c7a39ebc00 100644
662 --- a/arch/powerpc/mm/hugetlbpage-radix.c
663 +++ b/arch/powerpc/mm/hugetlbpage-radix.c
664 @@ -1,6 +1,7 @@
665 // SPDX-License-Identifier: GPL-2.0
666 #include <linux/mm.h>
667 #include <linux/hugetlb.h>
668 +#include <linux/security.h>
669 #include <asm/pgtable.h>
670 #include <asm/pgalloc.h>
671 #include <asm/cacheflush.h>
672 @@ -73,7 +74,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
673 if (addr) {
674 addr = ALIGN(addr, huge_page_size(h));
675 vma = find_vma(mm, addr);
676 - if (high_limit - len >= addr &&
677 + if (high_limit - len >= addr && addr >= mmap_min_addr &&
678 (!vma || addr + len <= vm_start_gap(vma)))
679 return addr;
680 }
681 @@ -83,7 +84,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
682 */
683 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
684 info.length = len;
685 - info.low_limit = PAGE_SIZE;
686 + info.low_limit = max(PAGE_SIZE, mmap_min_addr);
687 info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
688 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
689 info.align_offset = 0;
690 diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
691 index 87f0dd004295..b5d1c45c1475 100644
692 --- a/arch/powerpc/mm/numa.c
693 +++ b/arch/powerpc/mm/numa.c
694 @@ -1460,13 +1460,6 @@ static void reset_topology_timer(void)
695
696 #ifdef CONFIG_SMP
697
698 -static void stage_topology_update(int core_id)
699 -{
700 - cpumask_or(&cpu_associativity_changes_mask,
701 - &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
702 - reset_topology_timer();
703 -}
704 -
705 static int dt_update_callback(struct notifier_block *nb,
706 unsigned long action, void *data)
707 {
708 @@ -1479,7 +1472,7 @@ static int dt_update_callback(struct notifier_block *nb,
709 !of_prop_cmp(update->prop->name, "ibm,associativity")) {
710 u32 core_id;
711 of_property_read_u32(update->dn, "reg", &core_id);
712 - stage_topology_update(core_id);
713 + rc = dlpar_cpu_readd(core_id);
714 rc = NOTIFY_OK;
715 }
716 break;
717 diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
718 index 4a9a72d01c3c..35be81fd2dc2 100644
719 --- a/arch/powerpc/platforms/44x/Kconfig
720 +++ b/arch/powerpc/platforms/44x/Kconfig
721 @@ -180,6 +180,7 @@ config CURRITUCK
722 depends on PPC_47x
723 select SWIOTLB
724 select 476FPE
725 + select FORCE_PCI
726 select PPC4xx_PCI_EXPRESS
727 help
728 This option enables support for the IBM Currituck (476fpe) evaluation board
729 diff --git a/arch/powerpc/platforms/powernv/pci-ioda-tce.c b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
730 index 697449afb3f7..e28f03e1eb5e 100644
731 --- a/arch/powerpc/platforms/powernv/pci-ioda-tce.c
732 +++ b/arch/powerpc/platforms/powernv/pci-ioda-tce.c
733 @@ -313,7 +313,6 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
734 page_shift);
735 tbl->it_level_size = 1ULL << (level_shift - 3);
736 tbl->it_indirect_levels = levels - 1;
737 - tbl->it_allocated_size = total_allocated;
738 tbl->it_userspace = uas;
739 tbl->it_nid = nid;
740
741 diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
742 index 145373f0e5dc..2d62c58f9a4c 100644
743 --- a/arch/powerpc/platforms/powernv/pci-ioda.c
744 +++ b/arch/powerpc/platforms/powernv/pci-ioda.c
745 @@ -2594,8 +2594,13 @@ static long pnv_pci_ioda2_create_table_userspace(
746 int num, __u32 page_shift, __u64 window_size, __u32 levels,
747 struct iommu_table **ptbl)
748 {
749 - return pnv_pci_ioda2_create_table(table_group,
750 + long ret = pnv_pci_ioda2_create_table(table_group,
751 num, page_shift, window_size, levels, true, ptbl);
752 +
753 + if (!ret)
754 + (*ptbl)->it_allocated_size = pnv_pci_ioda2_get_table_size(
755 + page_shift, window_size, levels);
756 + return ret;
757 }
758
759 static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
760 diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
761 index 2f8e62163602..97feb6e79f1a 100644
762 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
763 +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
764 @@ -802,6 +802,25 @@ static int dlpar_cpu_add_by_count(u32 cpus_to_add)
765 return rc;
766 }
767
768 +int dlpar_cpu_readd(int cpu)
769 +{
770 + struct device_node *dn;
771 + struct device *dev;
772 + u32 drc_index;
773 + int rc;
774 +
775 + dev = get_cpu_device(cpu);
776 + dn = dev->of_node;
777 +
778 + rc = of_property_read_u32(dn, "ibm,my-drc-index", &drc_index);
779 +
780 + rc = dlpar_cpu_remove_by_index(drc_index);
781 + if (!rc)
782 + rc = dlpar_cpu_add(drc_index);
783 +
784 + return rc;
785 +}
786 +
787 int dlpar_cpu(struct pseries_hp_errorlog *hp_elog)
788 {
789 u32 count, drc_index;
790 diff --git a/arch/powerpc/xmon/ppc-dis.c b/arch/powerpc/xmon/ppc-dis.c
791 index 9deea5ee13f6..27f1e6415036 100644
792 --- a/arch/powerpc/xmon/ppc-dis.c
793 +++ b/arch/powerpc/xmon/ppc-dis.c
794 @@ -158,7 +158,7 @@ int print_insn_powerpc (unsigned long insn, unsigned long memaddr)
795 dialect |= (PPC_OPCODE_POWER5 | PPC_OPCODE_POWER6 | PPC_OPCODE_POWER7
796 | PPC_OPCODE_POWER8 | PPC_OPCODE_POWER9 | PPC_OPCODE_HTM
797 | PPC_OPCODE_ALTIVEC | PPC_OPCODE_ALTIVEC2
798 - | PPC_OPCODE_VSX | PPC_OPCODE_VSX3),
799 + | PPC_OPCODE_VSX | PPC_OPCODE_VSX3);
800
801 /* Get the major opcode of the insn. */
802 opcode = NULL;
803 diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
804 index bfabeb1889cc..1266194afb02 100644
805 --- a/arch/s390/kernel/perf_cpum_sf.c
806 +++ b/arch/s390/kernel/perf_cpum_sf.c
807 @@ -1600,7 +1600,7 @@ static void aux_sdb_init(unsigned long sdb)
808
809 /*
810 * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling
811 - * @cpu: On which to allocate, -1 means current
812 + * @event: Event the buffer is setup for, event->cpu == -1 means current
813 * @pages: Array of pointers to buffer pages passed from perf core
814 * @nr_pages: Total pages
815 * @snapshot: Flag for snapshot mode
816 @@ -1612,8 +1612,8 @@ static void aux_sdb_init(unsigned long sdb)
817 *
818 * Return the private AUX buffer structure if success or NULL if fails.
819 */
820 -static void *aux_buffer_setup(int cpu, void **pages, int nr_pages,
821 - bool snapshot)
822 +static void *aux_buffer_setup(struct perf_event *event, void **pages,
823 + int nr_pages, bool snapshot)
824 {
825 struct sf_buffer *sfb;
826 struct aux_buffer *aux;
827 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
828 index 9b5adae9cc40..e2839b5c246c 100644
829 --- a/arch/x86/boot/Makefile
830 +++ b/arch/x86/boot/Makefile
831 @@ -100,7 +100,7 @@ $(obj)/zoffset.h: $(obj)/compressed/vmlinux FORCE
832 AFLAGS_header.o += -I$(objtree)/$(obj)
833 $(obj)/header.o: $(obj)/zoffset.h
834
835 -LDFLAGS_setup.elf := -T
836 +LDFLAGS_setup.elf := -m elf_i386 -T
837 $(obj)/setup.elf: $(src)/setup.ld $(SETUP_OBJS) FORCE
838 $(call if_changed,ld)
839
840 diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
841 index a01ef1b0f883..7cdd7b13bbda 100644
842 --- a/arch/x86/events/intel/bts.c
843 +++ b/arch/x86/events/intel/bts.c
844 @@ -77,10 +77,12 @@ static size_t buf_size(struct page *page)
845 }
846
847 static void *
848 -bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite)
849 +bts_buffer_setup_aux(struct perf_event *event, void **pages,
850 + int nr_pages, bool overwrite)
851 {
852 struct bts_buffer *buf;
853 struct page *page;
854 + int cpu = event->cpu;
855 int node = (cpu == -1) ? cpu : cpu_to_node(cpu);
856 unsigned long offset;
857 size_t size = nr_pages << PAGE_SHIFT;
858 diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
859 index 9494ca68fd9d..c0e86ff21f81 100644
860 --- a/arch/x86/events/intel/pt.c
861 +++ b/arch/x86/events/intel/pt.c
862 @@ -1114,10 +1114,11 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages,
863 * Return: Our private PT buffer structure.
864 */
865 static void *
866 -pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot)
867 +pt_buffer_setup_aux(struct perf_event *event, void **pages,
868 + int nr_pages, bool snapshot)
869 {
870 struct pt_buffer *buf;
871 - int node, ret;
872 + int node, ret, cpu = event->cpu;
873
874 if (!nr_pages)
875 return NULL;
876 diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
877 index 7abb09e2eeb8..d3f42b6bbdac 100644
878 --- a/arch/x86/hyperv/hv_init.c
879 +++ b/arch/x86/hyperv/hv_init.c
880 @@ -406,6 +406,13 @@ void hyperv_cleanup(void)
881 /* Reset our OS id */
882 wrmsrl(HV_X64_MSR_GUEST_OS_ID, 0);
883
884 + /*
885 + * Reset hypercall page reference before reset the page,
886 + * let hypercall operations fail safely rather than
887 + * panic the kernel for using invalid hypercall page
888 + */
889 + hv_hypercall_pg = NULL;
890 +
891 /* Reset the hypercall page */
892 hypercall_msr.as_uint64 = 0;
893 wrmsrl(HV_X64_MSR_HYPERCALL, hypercall_msr.as_uint64);
894 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
895 index c1334aaaa78d..f3aed639dccd 100644
896 --- a/arch/x86/include/asm/uaccess.h
897 +++ b/arch/x86/include/asm/uaccess.h
898 @@ -76,7 +76,7 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
899 #endif
900
901 /**
902 - * access_ok: - Checks if a user space pointer is valid
903 + * access_ok - Checks if a user space pointer is valid
904 * @addr: User space pointer to start of block to check
905 * @size: Size of block to check
906 *
907 @@ -85,12 +85,12 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
908 *
909 * Checks if a pointer to a block of memory in user space is valid.
910 *
911 - * Returns true (nonzero) if the memory block may be valid, false (zero)
912 - * if it is definitely invalid.
913 - *
914 * Note that, depending on architecture, this function probably just
915 * checks that the pointer is in the user space range - after calling
916 * this function, memory access functions may still return -EFAULT.
917 + *
918 + * Return: true (nonzero) if the memory block may be valid, false (zero)
919 + * if it is definitely invalid.
920 */
921 #define access_ok(addr, size) \
922 ({ \
923 @@ -135,7 +135,7 @@ extern int __get_user_bad(void);
924 __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
925
926 /**
927 - * get_user: - Get a simple variable from user space.
928 + * get_user - Get a simple variable from user space.
929 * @x: Variable to store result.
930 * @ptr: Source address, in user space.
931 *
932 @@ -149,7 +149,7 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
933 * @ptr must have pointer-to-simple-variable type, and the result of
934 * dereferencing @ptr must be assignable to @x without a cast.
935 *
936 - * Returns zero on success, or -EFAULT on error.
937 + * Return: zero on success, or -EFAULT on error.
938 * On error, the variable @x is set to zero.
939 */
940 /*
941 @@ -227,7 +227,7 @@ extern void __put_user_4(void);
942 extern void __put_user_8(void);
943
944 /**
945 - * put_user: - Write a simple value into user space.
946 + * put_user - Write a simple value into user space.
947 * @x: Value to copy to user space.
948 * @ptr: Destination address, in user space.
949 *
950 @@ -241,7 +241,7 @@ extern void __put_user_8(void);
951 * @ptr must have pointer-to-simple-variable type, and @x must be assignable
952 * to the result of dereferencing @ptr.
953 *
954 - * Returns zero on success, or -EFAULT on error.
955 + * Return: zero on success, or -EFAULT on error.
956 */
957 #define put_user(x, ptr) \
958 ({ \
959 @@ -503,7 +503,7 @@ struct __large_struct { unsigned long buf[100]; };
960 } while (0)
961
962 /**
963 - * __get_user: - Get a simple variable from user space, with less checking.
964 + * __get_user - Get a simple variable from user space, with less checking.
965 * @x: Variable to store result.
966 * @ptr: Source address, in user space.
967 *
968 @@ -520,7 +520,7 @@ struct __large_struct { unsigned long buf[100]; };
969 * Caller must check the pointer with access_ok() before calling this
970 * function.
971 *
972 - * Returns zero on success, or -EFAULT on error.
973 + * Return: zero on success, or -EFAULT on error.
974 * On error, the variable @x is set to zero.
975 */
976
977 @@ -528,7 +528,7 @@ struct __large_struct { unsigned long buf[100]; };
978 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
979
980 /**
981 - * __put_user: - Write a simple value into user space, with less checking.
982 + * __put_user - Write a simple value into user space, with less checking.
983 * @x: Value to copy to user space.
984 * @ptr: Destination address, in user space.
985 *
986 @@ -545,7 +545,7 @@ struct __large_struct { unsigned long buf[100]; };
987 * Caller must check the pointer with access_ok() before calling this
988 * function.
989 *
990 - * Returns zero on success, or -EFAULT on error.
991 + * Return: zero on success, or -EFAULT on error.
992 */
993
994 #define __put_user(x, ptr) \
995 diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
996 index 53917a3ebf94..1f3b77367948 100644
997 --- a/arch/x86/kernel/kexec-bzimage64.c
998 +++ b/arch/x86/kernel/kexec-bzimage64.c
999 @@ -218,6 +218,9 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
1000 params->screen_info.ext_mem_k = 0;
1001 params->alt_mem_k = 0;
1002
1003 + /* Always fill in RSDP: it is either 0 or a valid value */
1004 + params->acpi_rsdp_addr = boot_params.acpi_rsdp_addr;
1005 +
1006 /* Default APM info */
1007 memset(&params->apm_bios_info, 0, sizeof(params->apm_bios_info));
1008
1009 @@ -256,7 +259,6 @@ setup_boot_parameters(struct kimage *image, struct boot_params *params,
1010 setup_efi_state(params, params_load_addr, efi_map_offset, efi_map_sz,
1011 efi_setup_data_offset);
1012 #endif
1013 -
1014 /* Setup EDD info */
1015 memcpy(params->eddbuf, boot_params.eddbuf,
1016 EDDMAXNR * sizeof(struct edd_info));
1017 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
1018 index 0d618ee634ac..ee3b5c7d662e 100644
1019 --- a/arch/x86/kernel/vmlinux.lds.S
1020 +++ b/arch/x86/kernel/vmlinux.lds.S
1021 @@ -401,7 +401,7 @@ SECTIONS
1022 * Per-cpu symbols which need to be offset from __per_cpu_load
1023 * for the boot processor.
1024 */
1025 -#define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
1026 +#define INIT_PER_CPU(x) init_per_cpu__##x = ABSOLUTE(x) + __per_cpu_load
1027 INIT_PER_CPU(gdt_page);
1028 INIT_PER_CPU(irq_stack_union);
1029
1030 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
1031 index bfd94e7812fc..7d290777246d 100644
1032 --- a/arch/x86/lib/usercopy_32.c
1033 +++ b/arch/x86/lib/usercopy_32.c
1034 @@ -54,13 +54,13 @@ do { \
1035 } while (0)
1036
1037 /**
1038 - * clear_user: - Zero a block of memory in user space.
1039 + * clear_user - Zero a block of memory in user space.
1040 * @to: Destination address, in user space.
1041 * @n: Number of bytes to zero.
1042 *
1043 * Zero a block of memory in user space.
1044 *
1045 - * Returns number of bytes that could not be cleared.
1046 + * Return: number of bytes that could not be cleared.
1047 * On success, this will be zero.
1048 */
1049 unsigned long
1050 @@ -74,14 +74,14 @@ clear_user(void __user *to, unsigned long n)
1051 EXPORT_SYMBOL(clear_user);
1052
1053 /**
1054 - * __clear_user: - Zero a block of memory in user space, with less checking.
1055 + * __clear_user - Zero a block of memory in user space, with less checking.
1056 * @to: Destination address, in user space.
1057 * @n: Number of bytes to zero.
1058 *
1059 * Zero a block of memory in user space. Caller must check
1060 * the specified block with access_ok() before calling this function.
1061 *
1062 - * Returns number of bytes that could not be cleared.
1063 + * Return: number of bytes that could not be cleared.
1064 * On success, this will be zero.
1065 */
1066 unsigned long
1067 diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
1068 index 17456a1d3f04..6c571ae86947 100644
1069 --- a/arch/x86/platform/efi/quirks.c
1070 +++ b/arch/x86/platform/efi/quirks.c
1071 @@ -717,7 +717,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
1072 * "efi_mm" cannot be used to check if the page fault had occurred
1073 * in the firmware context because efi=old_map doesn't use efi_pgd.
1074 */
1075 - if (efi_rts_work.efi_rts_id == NONE)
1076 + if (efi_rts_work.efi_rts_id == EFI_NONE)
1077 return;
1078
1079 /*
1080 @@ -742,7 +742,7 @@ void efi_recover_from_page_fault(unsigned long phys_addr)
1081 * because this case occurs *very* rarely and hence could be improved
1082 * on a need by basis.
1083 */
1084 - if (efi_rts_work.efi_rts_id == RESET_SYSTEM) {
1085 + if (efi_rts_work.efi_rts_id == EFI_RESET_SYSTEM) {
1086 pr_info("efi_reset_system() buggy! Reboot through BIOS\n");
1087 machine_real_restart(MRR_BIOS);
1088 return;
1089 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
1090 index 4463fa72db94..96cb20de08af 100644
1091 --- a/arch/x86/realmode/rm/Makefile
1092 +++ b/arch/x86/realmode/rm/Makefile
1093 @@ -47,7 +47,7 @@ $(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
1094 targets += realmode.lds
1095 $(obj)/realmode.lds: $(obj)/pasyms.h
1096
1097 -LDFLAGS_realmode.elf := --emit-relocs -T
1098 +LDFLAGS_realmode.elf := -m elf_i386 --emit-relocs -T
1099 CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj)
1100
1101 targets += realmode.elf
1102 diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
1103 index cd307767a134..e5ed28629271 100644
1104 --- a/block/bfq-iosched.c
1105 +++ b/block/bfq-iosched.c
1106 @@ -747,6 +747,7 @@ void bfq_weights_tree_add(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1107
1108 inc_counter:
1109 bfqq->weight_counter->num_active++;
1110 + bfqq->ref++;
1111 }
1112
1113 /*
1114 @@ -771,6 +772,7 @@ void __bfq_weights_tree_remove(struct bfq_data *bfqd,
1115
1116 reset_entity_pointer:
1117 bfqq->weight_counter = NULL;
1118 + bfq_put_queue(bfqq);
1119 }
1120
1121 /*
1122 @@ -782,9 +784,6 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
1123 {
1124 struct bfq_entity *entity = bfqq->entity.parent;
1125
1126 - __bfq_weights_tree_remove(bfqd, bfqq,
1127 - &bfqd->queue_weights_tree);
1128 -
1129 for_each_entity(entity) {
1130 struct bfq_sched_data *sd = entity->my_sched_data;
1131
1132 @@ -818,6 +817,15 @@ void bfq_weights_tree_remove(struct bfq_data *bfqd,
1133 bfqd->num_groups_with_pending_reqs--;
1134 }
1135 }
1136 +
1137 + /*
1138 + * Next function is invoked last, because it causes bfqq to be
1139 + * freed if the following holds: bfqq is not in service and
1140 + * has no dispatched request. DO NOT use bfqq after the next
1141 + * function invocation.
1142 + */
1143 + __bfq_weights_tree_remove(bfqd, bfqq,
1144 + &bfqd->queue_weights_tree);
1145 }
1146
1147 /*
1148 @@ -1011,7 +1019,8 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd,
1149
1150 static int bfqq_process_refs(struct bfq_queue *bfqq)
1151 {
1152 - return bfqq->ref - bfqq->allocated - bfqq->entity.on_st;
1153 + return bfqq->ref - bfqq->allocated - bfqq->entity.on_st -
1154 + (bfqq->weight_counter != NULL);
1155 }
1156
1157 /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */
1158 @@ -2224,7 +2233,8 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1159
1160 if (in_service_bfqq && in_service_bfqq != bfqq &&
1161 likely(in_service_bfqq != &bfqd->oom_bfqq) &&
1162 - bfq_rq_close_to_sector(io_struct, request, bfqd->last_position) &&
1163 + bfq_rq_close_to_sector(io_struct, request,
1164 + bfqd->in_serv_last_pos) &&
1165 bfqq->entity.parent == in_service_bfqq->entity.parent &&
1166 bfq_may_be_close_cooperator(bfqq, in_service_bfqq)) {
1167 new_bfqq = bfq_setup_merge(bfqq, in_service_bfqq);
1168 @@ -2764,6 +2774,8 @@ update_rate_and_reset:
1169 bfq_update_rate_reset(bfqd, rq);
1170 update_last_values:
1171 bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
1172 + if (RQ_BFQQ(rq) == bfqd->in_service_queue)
1173 + bfqd->in_serv_last_pos = bfqd->last_position;
1174 bfqd->last_dispatch = now_ns;
1175 }
1176
1177 diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
1178 index 0b02bf302de0..746bd570b85a 100644
1179 --- a/block/bfq-iosched.h
1180 +++ b/block/bfq-iosched.h
1181 @@ -537,6 +537,9 @@ struct bfq_data {
1182 /* on-disk position of the last served request */
1183 sector_t last_position;
1184
1185 + /* position of the last served request for the in-service queue */
1186 + sector_t in_serv_last_pos;
1187 +
1188 /* time of last request completion (ns) */
1189 u64 last_completion;
1190
1191 diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
1192 index 72adbbe975d5..4aab1a8191f0 100644
1193 --- a/block/bfq-wf2q.c
1194 +++ b/block/bfq-wf2q.c
1195 @@ -1667,15 +1667,15 @@ void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
1196
1197 bfqd->busy_queues--;
1198
1199 - if (!bfqq->dispatched)
1200 - bfq_weights_tree_remove(bfqd, bfqq);
1201 -
1202 if (bfqq->wr_coeff > 1)
1203 bfqd->wr_busy_queues--;
1204
1205 bfqg_stats_update_dequeue(bfqq_group(bfqq));
1206
1207 bfq_deactivate_bfqq(bfqd, bfqq, true, expiration);
1208 +
1209 + if (!bfqq->dispatched)
1210 + bfq_weights_tree_remove(bfqd, bfqq);
1211 }
1212
1213 /*
1214 diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
1215 index f0b52266b3ac..d73afb562ad9 100644
1216 --- a/drivers/acpi/acpi_video.c
1217 +++ b/drivers/acpi/acpi_video.c
1218 @@ -2124,21 +2124,29 @@ static int __init intel_opregion_present(void)
1219 return opregion;
1220 }
1221
1222 +/* Check if the chassis-type indicates there is no builtin LCD panel */
1223 static bool dmi_is_desktop(void)
1224 {
1225 const char *chassis_type;
1226 + unsigned long type;
1227
1228 chassis_type = dmi_get_system_info(DMI_CHASSIS_TYPE);
1229 if (!chassis_type)
1230 return false;
1231
1232 - if (!strcmp(chassis_type, "3") || /* 3: Desktop */
1233 - !strcmp(chassis_type, "4") || /* 4: Low Profile Desktop */
1234 - !strcmp(chassis_type, "5") || /* 5: Pizza Box */
1235 - !strcmp(chassis_type, "6") || /* 6: Mini Tower */
1236 - !strcmp(chassis_type, "7") || /* 7: Tower */
1237 - !strcmp(chassis_type, "11")) /* 11: Main Server Chassis */
1238 + if (kstrtoul(chassis_type, 10, &type) != 0)
1239 + return false;
1240 +
1241 + switch (type) {
1242 + case 0x03: /* Desktop */
1243 + case 0x04: /* Low Profile Desktop */
1244 + case 0x05: /* Pizza Box */
1245 + case 0x06: /* Mini Tower */
1246 + case 0x07: /* Tower */
1247 + case 0x10: /* Lunch Box */
1248 + case 0x11: /* Main Server Chassis */
1249 return true;
1250 + }
1251
1252 return false;
1253 }
1254 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
1255 index 2faefdd6f420..9a8d83bc1e75 100644
1256 --- a/drivers/block/loop.c
1257 +++ b/drivers/block/loop.c
1258 @@ -1089,16 +1089,12 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
1259 kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
1260 }
1261 mapping_set_gfp_mask(filp->f_mapping, gfp);
1262 - lo->lo_state = Lo_unbound;
1263 /* This is safe: open() is still holding a reference. */
1264 module_put(THIS_MODULE);
1265 blk_mq_unfreeze_queue(lo->lo_queue);
1266
1267 partscan = lo->lo_flags & LO_FLAGS_PARTSCAN && bdev;
1268 lo_number = lo->lo_number;
1269 - lo->lo_flags = 0;
1270 - if (!part_shift)
1271 - lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1272 loop_unprepare_queue(lo);
1273 out_unlock:
1274 mutex_unlock(&loop_ctl_mutex);
1275 @@ -1120,6 +1116,23 @@ out_unlock:
1276 /* Device is gone, no point in returning error */
1277 err = 0;
1278 }
1279 +
1280 + /*
1281 + * lo->lo_state is set to Lo_unbound here after above partscan has
1282 + * finished.
1283 + *
1284 + * There cannot be anybody else entering __loop_clr_fd() as
1285 + * lo->lo_backing_file is already cleared and Lo_rundown state
1286 + * protects us from all the other places trying to change the 'lo'
1287 + * device.
1288 + */
1289 + mutex_lock(&loop_ctl_mutex);
1290 + lo->lo_flags = 0;
1291 + if (!part_shift)
1292 + lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
1293 + lo->lo_state = Lo_unbound;
1294 + mutex_unlock(&loop_ctl_mutex);
1295 +
1296 /*
1297 * Need not hold loop_ctl_mutex to fput backing file.
1298 * Calling fput holding loop_ctl_mutex triggers a circular
1299 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
1300 index 614ecdbb4ab7..933268b8d6a5 100644
1301 --- a/drivers/cdrom/cdrom.c
1302 +++ b/drivers/cdrom/cdrom.c
1303 @@ -265,6 +265,7 @@
1304 /* #define ERRLOGMASK (CD_WARNING|CD_OPEN|CD_COUNT_TRACKS|CD_CLOSE) */
1305 /* #define ERRLOGMASK (CD_WARNING|CD_REG_UNREG|CD_DO_IOCTL|CD_OPEN|CD_CLOSE|CD_COUNT_TRACKS) */
1306
1307 +#include <linux/atomic.h>
1308 #include <linux/module.h>
1309 #include <linux/fs.h>
1310 #include <linux/major.h>
1311 @@ -3692,9 +3693,9 @@ static struct ctl_table_header *cdrom_sysctl_header;
1312
1313 static void cdrom_sysctl_register(void)
1314 {
1315 - static int initialized;
1316 + static atomic_t initialized = ATOMIC_INIT(0);
1317
1318 - if (initialized == 1)
1319 + if (!atomic_add_unless(&initialized, 1, 1))
1320 return;
1321
1322 cdrom_sysctl_header = register_sysctl_table(cdrom_root_table);
1323 @@ -3705,8 +3706,6 @@ static void cdrom_sysctl_register(void)
1324 cdrom_sysctl_settings.debug = debug;
1325 cdrom_sysctl_settings.lock = lockdoor;
1326 cdrom_sysctl_settings.check = check_media_type;
1327 -
1328 - initialized = 1;
1329 }
1330
1331 static void cdrom_sysctl_unregister(void)
1332 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
1333 index 4a22b4b41aef..9bffcd37cc7b 100644
1334 --- a/drivers/char/hpet.c
1335 +++ b/drivers/char/hpet.c
1336 @@ -377,7 +377,7 @@ static __init int hpet_mmap_enable(char *str)
1337 pr_info("HPET mmap %s\n", hpet_mmap_enabled ? "enabled" : "disabled");
1338 return 1;
1339 }
1340 -__setup("hpet_mmap", hpet_mmap_enable);
1341 +__setup("hpet_mmap=", hpet_mmap_enable);
1342
1343 static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
1344 {
1345 diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
1346 index b89df66ea1ae..7abd604e938c 100644
1347 --- a/drivers/char/hw_random/virtio-rng.c
1348 +++ b/drivers/char/hw_random/virtio-rng.c
1349 @@ -73,7 +73,7 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
1350
1351 if (!vi->busy) {
1352 vi->busy = true;
1353 - init_completion(&vi->have_data);
1354 + reinit_completion(&vi->have_data);
1355 register_buffer(vi, buf, size);
1356 }
1357
1358 diff --git a/drivers/clk/clk-fractional-divider.c b/drivers/clk/clk-fractional-divider.c
1359 index 545dceec0bbf..fdfe2e423d15 100644
1360 --- a/drivers/clk/clk-fractional-divider.c
1361 +++ b/drivers/clk/clk-fractional-divider.c
1362 @@ -79,7 +79,7 @@ static long clk_fd_round_rate(struct clk_hw *hw, unsigned long rate,
1363 unsigned long m, n;
1364 u64 ret;
1365
1366 - if (!rate || rate >= *parent_rate)
1367 + if (!rate || (!clk_hw_can_set_rate_parent(hw) && rate >= *parent_rate))
1368 return *parent_rate;
1369
1370 if (fd->approximation)
1371 diff --git a/drivers/clk/meson/meson-aoclk.c b/drivers/clk/meson/meson-aoclk.c
1372 index f965845917e3..258c8d259ea1 100644
1373 --- a/drivers/clk/meson/meson-aoclk.c
1374 +++ b/drivers/clk/meson/meson-aoclk.c
1375 @@ -65,15 +65,20 @@ int meson_aoclkc_probe(struct platform_device *pdev)
1376 return ret;
1377 }
1378
1379 - /*
1380 - * Populate regmap and register all clks
1381 - */
1382 - for (clkid = 0; clkid < data->num_clks; clkid++) {
1383 + /* Populate regmap */
1384 + for (clkid = 0; clkid < data->num_clks; clkid++)
1385 data->clks[clkid]->map = regmap;
1386
1387 + /* Register all clks */
1388 + for (clkid = 0; clkid < data->hw_data->num; clkid++) {
1389 + if (!data->hw_data->hws[clkid])
1390 + continue;
1391 +
1392 ret = devm_clk_hw_register(dev, data->hw_data->hws[clkid]);
1393 - if (ret)
1394 + if (ret) {
1395 + dev_err(dev, "Clock registration failed\n");
1396 return ret;
1397 + }
1398 }
1399
1400 return devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get,
1401 diff --git a/drivers/clk/rockchip/clk-rk3328.c b/drivers/clk/rockchip/clk-rk3328.c
1402 index faa94adb2a37..65ab5c2f48b0 100644
1403 --- a/drivers/clk/rockchip/clk-rk3328.c
1404 +++ b/drivers/clk/rockchip/clk-rk3328.c
1405 @@ -78,17 +78,17 @@ static struct rockchip_pll_rate_table rk3328_pll_rates[] = {
1406
1407 static struct rockchip_pll_rate_table rk3328_pll_frac_rates[] = {
1408 /* _mhz, _refdiv, _fbdiv, _postdiv1, _postdiv2, _dsmpd, _frac */
1409 - RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134217),
1410 + RK3036_PLL_RATE(1016064000, 3, 127, 1, 1, 0, 134218),
1411 /* vco = 1016064000 */
1412 - RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671088),
1413 + RK3036_PLL_RATE(983040000, 24, 983, 1, 1, 0, 671089),
1414 /* vco = 983040000 */
1415 - RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671088),
1416 + RK3036_PLL_RATE(491520000, 24, 983, 2, 1, 0, 671089),
1417 /* vco = 983040000 */
1418 - RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671088),
1419 + RK3036_PLL_RATE(61440000, 6, 215, 7, 2, 0, 671089),
1420 /* vco = 860156000 */
1421 - RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797894),
1422 + RK3036_PLL_RATE(56448000, 12, 451, 4, 4, 0, 9797895),
1423 /* vco = 903168000 */
1424 - RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066329),
1425 + RK3036_PLL_RATE(40960000, 12, 409, 4, 5, 0, 10066330),
1426 /* vco = 819200000 */
1427 { /* sentinel */ },
1428 };
1429 diff --git a/drivers/clk/ti/clkctrl.c b/drivers/clk/ti/clkctrl.c
1430 index 40630eb950fc..85d7f301149b 100644
1431 --- a/drivers/clk/ti/clkctrl.c
1432 +++ b/drivers/clk/ti/clkctrl.c
1433 @@ -530,7 +530,7 @@ static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
1434 * Create default clkdm name, replace _cm from end of parent
1435 * node name with _clkdm
1436 */
1437 - provider->clkdm_name[strlen(provider->clkdm_name) - 5] = 0;
1438 + provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
1439 } else {
1440 provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
1441 if (!provider->clkdm_name) {
1442 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
1443 index d62fd374d5c7..c72258a44ba4 100644
1444 --- a/drivers/cpufreq/acpi-cpufreq.c
1445 +++ b/drivers/cpufreq/acpi-cpufreq.c
1446 @@ -916,8 +916,10 @@ static void __init acpi_cpufreq_boost_init(void)
1447 {
1448 int ret;
1449
1450 - if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)))
1451 + if (!(boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA))) {
1452 + pr_debug("Boost capabilities not present in the processor\n");
1453 return;
1454 + }
1455
1456 acpi_cpufreq_driver.set_boost = set_boost;
1457 acpi_cpufreq_driver.boost_enabled = boost_state(0);
1458 diff --git a/drivers/crypto/amcc/crypto4xx_trng.c b/drivers/crypto/amcc/crypto4xx_trng.c
1459 index 5e63742b0d22..53ab1f140a26 100644
1460 --- a/drivers/crypto/amcc/crypto4xx_trng.c
1461 +++ b/drivers/crypto/amcc/crypto4xx_trng.c
1462 @@ -80,8 +80,10 @@ void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev)
1463
1464 /* Find the TRNG device node and map it */
1465 trng = of_find_matching_node(NULL, ppc4xx_trng_match);
1466 - if (!trng || !of_device_is_available(trng))
1467 + if (!trng || !of_device_is_available(trng)) {
1468 + of_node_put(trng);
1469 return;
1470 + }
1471
1472 dev->trng_base = of_iomap(trng, 0);
1473 of_node_put(trng);
1474 diff --git a/drivers/crypto/cavium/zip/zip_main.c b/drivers/crypto/cavium/zip/zip_main.c
1475 index be055b9547f6..6183f9128a8a 100644
1476 --- a/drivers/crypto/cavium/zip/zip_main.c
1477 +++ b/drivers/crypto/cavium/zip/zip_main.c
1478 @@ -351,6 +351,7 @@ static struct pci_driver zip_driver = {
1479
1480 static struct crypto_alg zip_comp_deflate = {
1481 .cra_name = "deflate",
1482 + .cra_driver_name = "deflate-cavium",
1483 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
1484 .cra_ctxsize = sizeof(struct zip_kernel_ctx),
1485 .cra_priority = 300,
1486 @@ -365,6 +366,7 @@ static struct crypto_alg zip_comp_deflate = {
1487
1488 static struct crypto_alg zip_comp_lzs = {
1489 .cra_name = "lzs",
1490 + .cra_driver_name = "lzs-cavium",
1491 .cra_flags = CRYPTO_ALG_TYPE_COMPRESS,
1492 .cra_ctxsize = sizeof(struct zip_kernel_ctx),
1493 .cra_priority = 300,
1494 @@ -384,7 +386,7 @@ static struct scomp_alg zip_scomp_deflate = {
1495 .decompress = zip_scomp_decompress,
1496 .base = {
1497 .cra_name = "deflate",
1498 - .cra_driver_name = "deflate-scomp",
1499 + .cra_driver_name = "deflate-scomp-cavium",
1500 .cra_module = THIS_MODULE,
1501 .cra_priority = 300,
1502 }
1503 @@ -397,7 +399,7 @@ static struct scomp_alg zip_scomp_lzs = {
1504 .decompress = zip_scomp_decompress,
1505 .base = {
1506 .cra_name = "lzs",
1507 - .cra_driver_name = "lzs-scomp",
1508 + .cra_driver_name = "lzs-scomp-cavium",
1509 .cra_module = THIS_MODULE,
1510 .cra_priority = 300,
1511 }
1512 diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
1513 index 4a09af3cd546..7b9a7fb28bb9 100644
1514 --- a/drivers/dma/imx-dma.c
1515 +++ b/drivers/dma/imx-dma.c
1516 @@ -285,7 +285,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
1517 struct scatterlist *sg = d->sg;
1518 unsigned long now;
1519
1520 - now = min(d->len, sg_dma_len(sg));
1521 + now = min_t(size_t, d->len, sg_dma_len(sg));
1522 if (d->len != IMX_DMA_LENGTH_LOOP)
1523 d->len -= now;
1524
1525 diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
1526 index 43d4b00b8138..411f91fde734 100644
1527 --- a/drivers/dma/qcom/hidma.c
1528 +++ b/drivers/dma/qcom/hidma.c
1529 @@ -138,24 +138,25 @@ static void hidma_process_completed(struct hidma_chan *mchan)
1530 desc = &mdesc->desc;
1531 last_cookie = desc->cookie;
1532
1533 + llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
1534 +
1535 spin_lock_irqsave(&mchan->lock, irqflags);
1536 + if (llstat == DMA_COMPLETE) {
1537 + mchan->last_success = last_cookie;
1538 + result.result = DMA_TRANS_NOERROR;
1539 + } else {
1540 + result.result = DMA_TRANS_ABORTED;
1541 + }
1542 +
1543 dma_cookie_complete(desc);
1544 spin_unlock_irqrestore(&mchan->lock, irqflags);
1545
1546 - llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
1547 dmaengine_desc_get_callback(desc, &cb);
1548
1549 dma_run_dependencies(desc);
1550
1551 spin_lock_irqsave(&mchan->lock, irqflags);
1552 list_move(&mdesc->node, &mchan->free);
1553 -
1554 - if (llstat == DMA_COMPLETE) {
1555 - mchan->last_success = last_cookie;
1556 - result.result = DMA_TRANS_NOERROR;
1557 - } else
1558 - result.result = DMA_TRANS_ABORTED;
1559 -
1560 spin_unlock_irqrestore(&mchan->lock, irqflags);
1561
1562 dmaengine_desc_callback_invoke(&cb, &result);
1563 @@ -415,6 +416,7 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
1564 if (!mdesc)
1565 return NULL;
1566
1567 + mdesc->desc.flags = flags;
1568 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
1569 src, dest, len, flags,
1570 HIDMA_TRE_MEMCPY);
1571 @@ -447,6 +449,7 @@ hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
1572 if (!mdesc)
1573 return NULL;
1574
1575 + mdesc->desc.flags = flags;
1576 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
1577 value, dest, len, flags,
1578 HIDMA_TRE_MEMSET);
1579 diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
1580 index 9a558e30c461..8219ab88a507 100644
1581 --- a/drivers/dma/tegra20-apb-dma.c
1582 +++ b/drivers/dma/tegra20-apb-dma.c
1583 @@ -636,7 +636,10 @@ static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
1584
1585 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
1586 dma_desc = sgreq->dma_desc;
1587 - dma_desc->bytes_transferred += sgreq->req_len;
1588 + /* if we dma for long enough the transfer count will wrap */
1589 + dma_desc->bytes_transferred =
1590 + (dma_desc->bytes_transferred + sgreq->req_len) %
1591 + dma_desc->bytes_requested;
1592
1593 /* Callback need to be call */
1594 if (!dma_desc->cb_count)
1595 diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
1596 index a7902fccdcfa..6090d25dce85 100644
1597 --- a/drivers/firmware/efi/cper.c
1598 +++ b/drivers/firmware/efi/cper.c
1599 @@ -546,19 +546,24 @@ EXPORT_SYMBOL_GPL(cper_estatus_check_header);
1600 int cper_estatus_check(const struct acpi_hest_generic_status *estatus)
1601 {
1602 struct acpi_hest_generic_data *gdata;
1603 - unsigned int data_len, gedata_len;
1604 + unsigned int data_len, record_size;
1605 int rc;
1606
1607 rc = cper_estatus_check_header(estatus);
1608 if (rc)
1609 return rc;
1610 +
1611 data_len = estatus->data_length;
1612
1613 apei_estatus_for_each_section(estatus, gdata) {
1614 - gedata_len = acpi_hest_get_error_length(gdata);
1615 - if (gedata_len > data_len - acpi_hest_get_size(gdata))
1616 + if (sizeof(struct acpi_hest_generic_data) > data_len)
1617 + return -EINVAL;
1618 +
1619 + record_size = acpi_hest_get_record_size(gdata);
1620 + if (record_size > data_len)
1621 return -EINVAL;
1622 - data_len -= acpi_hest_get_record_size(gdata);
1623 +
1624 + data_len -= record_size;
1625 }
1626 if (data_len)
1627 return -EINVAL;
1628 diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
1629 index c037c6c5d0b7..04e6ecd72cd9 100644
1630 --- a/drivers/firmware/efi/libstub/arm-stub.c
1631 +++ b/drivers/firmware/efi/libstub/arm-stub.c
1632 @@ -367,6 +367,11 @@ void efi_get_virtmap(efi_memory_desc_t *memory_map, unsigned long map_size,
1633 paddr = in->phys_addr;
1634 size = in->num_pages * EFI_PAGE_SIZE;
1635
1636 + if (novamap()) {
1637 + in->virt_addr = in->phys_addr;
1638 + continue;
1639 + }
1640 +
1641 /*
1642 * Make the mapping compatible with 64k pages: this allows
1643 * a 4k page size kernel to kexec a 64k page size kernel and
1644 diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
1645 index e94975f4655b..442f51c2a53d 100644
1646 --- a/drivers/firmware/efi/libstub/efi-stub-helper.c
1647 +++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
1648 @@ -34,6 +34,7 @@ static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
1649
1650 static int __section(.data) __nokaslr;
1651 static int __section(.data) __quiet;
1652 +static int __section(.data) __novamap;
1653
1654 int __pure nokaslr(void)
1655 {
1656 @@ -43,6 +44,10 @@ int __pure is_quiet(void)
1657 {
1658 return __quiet;
1659 }
1660 +int __pure novamap(void)
1661 +{
1662 + return __novamap;
1663 +}
1664
1665 #define EFI_MMAP_NR_SLACK_SLOTS 8
1666
1667 @@ -482,6 +487,11 @@ efi_status_t efi_parse_options(char const *cmdline)
1668 __chunk_size = -1UL;
1669 }
1670
1671 + if (!strncmp(str, "novamap", 7)) {
1672 + str += strlen("novamap");
1673 + __novamap = 1;
1674 + }
1675 +
1676 /* Group words together, delimited by "," */
1677 while (*str && *str != ' ' && *str != ',')
1678 str++;
1679 diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
1680 index 32799cf039ef..337b52c4702c 100644
1681 --- a/drivers/firmware/efi/libstub/efistub.h
1682 +++ b/drivers/firmware/efi/libstub/efistub.h
1683 @@ -27,6 +27,7 @@
1684
1685 extern int __pure nokaslr(void);
1686 extern int __pure is_quiet(void);
1687 +extern int __pure novamap(void);
1688
1689 #define pr_efi(sys_table, msg) do { \
1690 if (!is_quiet()) efi_printk(sys_table, "EFI stub: "msg); \
1691 diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
1692 index 0dc7b4987cc2..f8f89f995e9d 100644
1693 --- a/drivers/firmware/efi/libstub/fdt.c
1694 +++ b/drivers/firmware/efi/libstub/fdt.c
1695 @@ -327,6 +327,9 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
1696 if (status == EFI_SUCCESS) {
1697 efi_set_virtual_address_map_t *svam;
1698
1699 + if (novamap())
1700 + return EFI_SUCCESS;
1701 +
1702 /* Install the new virtual address map */
1703 svam = sys_table->runtime->set_virtual_address_map;
1704 status = svam(runtime_entry_count * desc_size, desc_size,
1705 diff --git a/drivers/firmware/efi/memattr.c b/drivers/firmware/efi/memattr.c
1706 index 8986757eafaf..aac972b056d9 100644
1707 --- a/drivers/firmware/efi/memattr.c
1708 +++ b/drivers/firmware/efi/memattr.c
1709 @@ -94,7 +94,7 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
1710
1711 if (!(md->attribute & EFI_MEMORY_RUNTIME))
1712 continue;
1713 - if (md->virt_addr == 0) {
1714 + if (md->virt_addr == 0 && md->phys_addr != 0) {
1715 /* no virtual mapping has been installed by the stub */
1716 break;
1717 }
1718 diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
1719 index e2abfdb5cee6..698745c249e8 100644
1720 --- a/drivers/firmware/efi/runtime-wrappers.c
1721 +++ b/drivers/firmware/efi/runtime-wrappers.c
1722 @@ -85,7 +85,7 @@ struct efi_runtime_work efi_rts_work;
1723 pr_err("Failed to queue work to efi_rts_wq.\n"); \
1724 \
1725 exit: \
1726 - efi_rts_work.efi_rts_id = NONE; \
1727 + efi_rts_work.efi_rts_id = EFI_NONE; \
1728 efi_rts_work.status; \
1729 })
1730
1731 @@ -175,50 +175,50 @@ static void efi_call_rts(struct work_struct *work)
1732 arg5 = efi_rts_work.arg5;
1733
1734 switch (efi_rts_work.efi_rts_id) {
1735 - case GET_TIME:
1736 + case EFI_GET_TIME:
1737 status = efi_call_virt(get_time, (efi_time_t *)arg1,
1738 (efi_time_cap_t *)arg2);
1739 break;
1740 - case SET_TIME:
1741 + case EFI_SET_TIME:
1742 status = efi_call_virt(set_time, (efi_time_t *)arg1);
1743 break;
1744 - case GET_WAKEUP_TIME:
1745 + case EFI_GET_WAKEUP_TIME:
1746 status = efi_call_virt(get_wakeup_time, (efi_bool_t *)arg1,
1747 (efi_bool_t *)arg2, (efi_time_t *)arg3);
1748 break;
1749 - case SET_WAKEUP_TIME:
1750 + case EFI_SET_WAKEUP_TIME:
1751 status = efi_call_virt(set_wakeup_time, *(efi_bool_t *)arg1,
1752 (efi_time_t *)arg2);
1753 break;
1754 - case GET_VARIABLE:
1755 + case EFI_GET_VARIABLE:
1756 status = efi_call_virt(get_variable, (efi_char16_t *)arg1,
1757 (efi_guid_t *)arg2, (u32 *)arg3,
1758 (unsigned long *)arg4, (void *)arg5);
1759 break;
1760 - case GET_NEXT_VARIABLE:
1761 + case EFI_GET_NEXT_VARIABLE:
1762 status = efi_call_virt(get_next_variable, (unsigned long *)arg1,
1763 (efi_char16_t *)arg2,
1764 (efi_guid_t *)arg3);
1765 break;
1766 - case SET_VARIABLE:
1767 + case EFI_SET_VARIABLE:
1768 status = efi_call_virt(set_variable, (efi_char16_t *)arg1,
1769 (efi_guid_t *)arg2, *(u32 *)arg3,
1770 *(unsigned long *)arg4, (void *)arg5);
1771 break;
1772 - case QUERY_VARIABLE_INFO:
1773 + case EFI_QUERY_VARIABLE_INFO:
1774 status = efi_call_virt(query_variable_info, *(u32 *)arg1,
1775 (u64 *)arg2, (u64 *)arg3, (u64 *)arg4);
1776 break;
1777 - case GET_NEXT_HIGH_MONO_COUNT:
1778 + case EFI_GET_NEXT_HIGH_MONO_COUNT:
1779 status = efi_call_virt(get_next_high_mono_count, (u32 *)arg1);
1780 break;
1781 - case UPDATE_CAPSULE:
1782 + case EFI_UPDATE_CAPSULE:
1783 status = efi_call_virt(update_capsule,
1784 (efi_capsule_header_t **)arg1,
1785 *(unsigned long *)arg2,
1786 *(unsigned long *)arg3);
1787 break;
1788 - case QUERY_CAPSULE_CAPS:
1789 + case EFI_QUERY_CAPSULE_CAPS:
1790 status = efi_call_virt(query_capsule_caps,
1791 (efi_capsule_header_t **)arg1,
1792 *(unsigned long *)arg2, (u64 *)arg3,
1793 @@ -242,7 +242,7 @@ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
1794
1795 if (down_interruptible(&efi_runtime_lock))
1796 return EFI_ABORTED;
1797 - status = efi_queue_work(GET_TIME, tm, tc, NULL, NULL, NULL);
1798 + status = efi_queue_work(EFI_GET_TIME, tm, tc, NULL, NULL, NULL);
1799 up(&efi_runtime_lock);
1800 return status;
1801 }
1802 @@ -253,7 +253,7 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
1803
1804 if (down_interruptible(&efi_runtime_lock))
1805 return EFI_ABORTED;
1806 - status = efi_queue_work(SET_TIME, tm, NULL, NULL, NULL, NULL);
1807 + status = efi_queue_work(EFI_SET_TIME, tm, NULL, NULL, NULL, NULL);
1808 up(&efi_runtime_lock);
1809 return status;
1810 }
1811 @@ -266,7 +266,7 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
1812
1813 if (down_interruptible(&efi_runtime_lock))
1814 return EFI_ABORTED;
1815 - status = efi_queue_work(GET_WAKEUP_TIME, enabled, pending, tm, NULL,
1816 + status = efi_queue_work(EFI_GET_WAKEUP_TIME, enabled, pending, tm, NULL,
1817 NULL);
1818 up(&efi_runtime_lock);
1819 return status;
1820 @@ -278,7 +278,7 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
1821
1822 if (down_interruptible(&efi_runtime_lock))
1823 return EFI_ABORTED;
1824 - status = efi_queue_work(SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
1825 + status = efi_queue_work(EFI_SET_WAKEUP_TIME, &enabled, tm, NULL, NULL,
1826 NULL);
1827 up(&efi_runtime_lock);
1828 return status;
1829 @@ -294,7 +294,7 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
1830
1831 if (down_interruptible(&efi_runtime_lock))
1832 return EFI_ABORTED;
1833 - status = efi_queue_work(GET_VARIABLE, name, vendor, attr, data_size,
1834 + status = efi_queue_work(EFI_GET_VARIABLE, name, vendor, attr, data_size,
1835 data);
1836 up(&efi_runtime_lock);
1837 return status;
1838 @@ -308,7 +308,7 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
1839
1840 if (down_interruptible(&efi_runtime_lock))
1841 return EFI_ABORTED;
1842 - status = efi_queue_work(GET_NEXT_VARIABLE, name_size, name, vendor,
1843 + status = efi_queue_work(EFI_GET_NEXT_VARIABLE, name_size, name, vendor,
1844 NULL, NULL);
1845 up(&efi_runtime_lock);
1846 return status;
1847 @@ -324,7 +324,7 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
1848
1849 if (down_interruptible(&efi_runtime_lock))
1850 return EFI_ABORTED;
1851 - status = efi_queue_work(SET_VARIABLE, name, vendor, &attr, &data_size,
1852 + status = efi_queue_work(EFI_SET_VARIABLE, name, vendor, &attr, &data_size,
1853 data);
1854 up(&efi_runtime_lock);
1855 return status;
1856 @@ -359,7 +359,7 @@ static efi_status_t virt_efi_query_variable_info(u32 attr,
1857
1858 if (down_interruptible(&efi_runtime_lock))
1859 return EFI_ABORTED;
1860 - status = efi_queue_work(QUERY_VARIABLE_INFO, &attr, storage_space,
1861 + status = efi_queue_work(EFI_QUERY_VARIABLE_INFO, &attr, storage_space,
1862 remaining_space, max_variable_size, NULL);
1863 up(&efi_runtime_lock);
1864 return status;
1865 @@ -391,7 +391,7 @@ static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
1866
1867 if (down_interruptible(&efi_runtime_lock))
1868 return EFI_ABORTED;
1869 - status = efi_queue_work(GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
1870 + status = efi_queue_work(EFI_GET_NEXT_HIGH_MONO_COUNT, count, NULL, NULL,
1871 NULL, NULL);
1872 up(&efi_runtime_lock);
1873 return status;
1874 @@ -407,7 +407,7 @@ static void virt_efi_reset_system(int reset_type,
1875 "could not get exclusive access to the firmware\n");
1876 return;
1877 }
1878 - efi_rts_work.efi_rts_id = RESET_SYSTEM;
1879 + efi_rts_work.efi_rts_id = EFI_RESET_SYSTEM;
1880 __efi_call_virt(reset_system, reset_type, status, data_size, data);
1881 up(&efi_runtime_lock);
1882 }
1883 @@ -423,7 +423,7 @@ static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
1884
1885 if (down_interruptible(&efi_runtime_lock))
1886 return EFI_ABORTED;
1887 - status = efi_queue_work(UPDATE_CAPSULE, capsules, &count, &sg_list,
1888 + status = efi_queue_work(EFI_UPDATE_CAPSULE, capsules, &count, &sg_list,
1889 NULL, NULL);
1890 up(&efi_runtime_lock);
1891 return status;
1892 @@ -441,7 +441,7 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
1893
1894 if (down_interruptible(&efi_runtime_lock))
1895 return EFI_ABORTED;
1896 - status = efi_queue_work(QUERY_CAPSULE_CAPS, capsules, &count,
1897 + status = efi_queue_work(EFI_QUERY_CAPSULE_CAPS, capsules, &count,
1898 max_size, reset_type, NULL);
1899 up(&efi_runtime_lock);
1900 return status;
1901 diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
1902 index f4e9921fa966..7f33024b6d83 100644
1903 --- a/drivers/gpio/gpio-omap.c
1904 +++ b/drivers/gpio/gpio-omap.c
1905 @@ -883,14 +883,16 @@ static void omap_gpio_unmask_irq(struct irq_data *d)
1906 if (trigger)
1907 omap_set_gpio_triggering(bank, offset, trigger);
1908
1909 - /* For level-triggered GPIOs, the clearing must be done after
1910 - * the HW source is cleared, thus after the handler has run */
1911 - if (bank->level_mask & BIT(offset)) {
1912 - omap_set_gpio_irqenable(bank, offset, 0);
1913 + omap_set_gpio_irqenable(bank, offset, 1);
1914 +
1915 + /*
1916 + * For level-triggered GPIOs, clearing must be done after the source
1917 + * is cleared, thus after the handler has run. OMAP4 needs this done
1918 + * after enabing the interrupt to clear the wakeup status.
1919 + */
1920 + if (bank->level_mask & BIT(offset))
1921 omap_clear_gpio_irqstatus(bank, offset);
1922 - }
1923
1924 - omap_set_gpio_irqenable(bank, offset, 1);
1925 raw_spin_unlock_irqrestore(&bank->lock, flags);
1926 }
1927
1928 diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
1929 index a6e1891217e2..a1dd2f1c0d02 100644
1930 --- a/drivers/gpio/gpiolib-of.c
1931 +++ b/drivers/gpio/gpiolib-of.c
1932 @@ -86,7 +86,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
1933 if (IS_ENABLED(CONFIG_REGULATOR) &&
1934 (of_device_is_compatible(np, "regulator-fixed") ||
1935 of_device_is_compatible(np, "reg-fixed-voltage") ||
1936 - of_device_is_compatible(np, "regulator-gpio"))) {
1937 + (of_device_is_compatible(np, "regulator-gpio") &&
1938 + strcmp(propname, "enable-gpio") == 0))) {
1939 /*
1940 * The regulator GPIO handles are specified such that the
1941 * presence or absence of "enable-active-high" solely controls
1942 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1943 index 636d14a60952..83c8a0407537 100644
1944 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1945 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1946 @@ -886,6 +886,7 @@ static void emulated_link_detect(struct dc_link *link)
1947 return;
1948 }
1949
1950 + /* dc_sink_create returns a new reference */
1951 link->local_sink = sink;
1952
1953 edid_status = dm_helpers_read_local_edid(
1954 @@ -952,6 +953,8 @@ static int dm_resume(void *handle)
1955 if (aconnector->fake_enable && aconnector->dc_link->local_sink)
1956 aconnector->fake_enable = false;
1957
1958 + if (aconnector->dc_sink)
1959 + dc_sink_release(aconnector->dc_sink);
1960 aconnector->dc_sink = NULL;
1961 amdgpu_dm_update_connector_after_detect(aconnector);
1962 mutex_unlock(&aconnector->hpd_lock);
1963 @@ -1061,6 +1064,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1964
1965
1966 sink = aconnector->dc_link->local_sink;
1967 + if (sink)
1968 + dc_sink_retain(sink);
1969
1970 /*
1971 * Edid mgmt connector gets first update only in mode_valid hook and then
1972 @@ -1085,21 +1090,24 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
1973 * to it anymore after disconnect, so on next crtc to connector
1974 * reshuffle by UMD we will get into unwanted dc_sink release
1975 */
1976 - if (aconnector->dc_sink != aconnector->dc_em_sink)
1977 - dc_sink_release(aconnector->dc_sink);
1978 + dc_sink_release(aconnector->dc_sink);
1979 }
1980 aconnector->dc_sink = sink;
1981 + dc_sink_retain(aconnector->dc_sink);
1982 amdgpu_dm_update_freesync_caps(connector,
1983 aconnector->edid);
1984 } else {
1985 amdgpu_dm_update_freesync_caps(connector, NULL);
1986 - if (!aconnector->dc_sink)
1987 + if (!aconnector->dc_sink) {
1988 aconnector->dc_sink = aconnector->dc_em_sink;
1989 - else if (aconnector->dc_sink != aconnector->dc_em_sink)
1990 dc_sink_retain(aconnector->dc_sink);
1991 + }
1992 }
1993
1994 mutex_unlock(&dev->mode_config.mutex);
1995 +
1996 + if (sink)
1997 + dc_sink_release(sink);
1998 return;
1999 }
2000
2001 @@ -1107,8 +1115,10 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
2002 * TODO: temporary guard to look for proper fix
2003 * if this sink is MST sink, we should not do anything
2004 */
2005 - if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
2006 + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
2007 + dc_sink_release(sink);
2008 return;
2009 + }
2010
2011 if (aconnector->dc_sink == sink) {
2012 /*
2013 @@ -1117,6 +1127,8 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
2014 */
2015 DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n",
2016 aconnector->connector_id);
2017 + if (sink)
2018 + dc_sink_release(sink);
2019 return;
2020 }
2021
2022 @@ -1138,6 +1150,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
2023 amdgpu_dm_update_freesync_caps(connector, NULL);
2024
2025 aconnector->dc_sink = sink;
2026 + dc_sink_retain(aconnector->dc_sink);
2027 if (sink->dc_edid.length == 0) {
2028 aconnector->edid = NULL;
2029 drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
2030 @@ -1158,11 +1171,15 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
2031 amdgpu_dm_update_freesync_caps(connector, NULL);
2032 drm_connector_update_edid_property(connector, NULL);
2033 aconnector->num_modes = 0;
2034 + dc_sink_release(aconnector->dc_sink);
2035 aconnector->dc_sink = NULL;
2036 aconnector->edid = NULL;
2037 }
2038
2039 mutex_unlock(&dev->mode_config.mutex);
2040 +
2041 + if (sink)
2042 + dc_sink_release(sink);
2043 }
2044
2045 static void handle_hpd_irq(void *param)
2046 @@ -2908,6 +2925,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2047 }
2048 } else {
2049 sink = aconnector->dc_sink;
2050 + dc_sink_retain(sink);
2051 }
2052
2053 stream = dc_create_stream_for_sink(sink);
2054 @@ -2974,8 +2992,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
2055 stream->ignore_msa_timing_param = true;
2056
2057 finish:
2058 - if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
2059 - dc_sink_release(sink);
2060 + dc_sink_release(sink);
2061
2062 return stream;
2063 }
2064 @@ -3233,6 +3250,14 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
2065 dm->backlight_dev = NULL;
2066 }
2067 #endif
2068 +
2069 + if (aconnector->dc_em_sink)
2070 + dc_sink_release(aconnector->dc_em_sink);
2071 + aconnector->dc_em_sink = NULL;
2072 + if (aconnector->dc_sink)
2073 + dc_sink_release(aconnector->dc_sink);
2074 + aconnector->dc_sink = NULL;
2075 +
2076 drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
2077 drm_connector_unregister(connector);
2078 drm_connector_cleanup(connector);
2079 @@ -3330,10 +3355,12 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
2080 (edid->extensions + 1) * EDID_LENGTH,
2081 &init_params);
2082
2083 - if (aconnector->base.force == DRM_FORCE_ON)
2084 + if (aconnector->base.force == DRM_FORCE_ON) {
2085 aconnector->dc_sink = aconnector->dc_link->local_sink ?
2086 aconnector->dc_link->local_sink :
2087 aconnector->dc_em_sink;
2088 + dc_sink_retain(aconnector->dc_sink);
2089 + }
2090 }
2091
2092 static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector)
2093 @@ -4948,7 +4975,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
2094 static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state,
2095 struct dc_stream_state *stream_state)
2096 {
2097 - stream_state->mode_changed = crtc_state->mode_changed;
2098 + stream_state->mode_changed =
2099 + crtc_state->mode_changed || crtc_state->active_changed;
2100 }
2101
2102 static int amdgpu_dm_atomic_commit(struct drm_device *dev,
2103 @@ -4969,10 +4997,22 @@ static int amdgpu_dm_atomic_commit(struct drm_device *dev,
2104 */
2105 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
2106 struct dm_crtc_state *dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
2107 + struct dm_crtc_state *dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
2108 struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
2109
2110 - if (drm_atomic_crtc_needs_modeset(new_crtc_state) && dm_old_crtc_state->stream)
2111 + if (drm_atomic_crtc_needs_modeset(new_crtc_state)
2112 + && dm_old_crtc_state->stream) {
2113 + /*
2114 + * CRC capture was enabled but not disabled.
2115 + * Release the vblank reference.
2116 + */
2117 + if (dm_new_crtc_state->crc_enabled) {
2118 + drm_crtc_vblank_put(crtc);
2119 + dm_new_crtc_state->crc_enabled = false;
2120 + }
2121 +
2122 manage_dm_interrupts(adev, acrtc, false);
2123 + }
2124 }
2125 /*
2126 * Add check here for SoC's that support hardware cursor plane, to
2127 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
2128 index f088ac585978..26b651148c67 100644
2129 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
2130 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
2131 @@ -66,6 +66,7 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
2132 {
2133 struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
2134 struct dc_stream_state *stream_state = crtc_state->stream;
2135 + bool enable;
2136
2137 enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
2138
2139 @@ -80,28 +81,27 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
2140 return -EINVAL;
2141 }
2142
2143 + enable = (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO);
2144 +
2145 + if (!dc_stream_configure_crc(stream_state->ctx->dc, stream_state,
2146 + enable, enable))
2147 + return -EINVAL;
2148 +
2149 /* When enabling CRC, we should also disable dithering. */
2150 - if (source == AMDGPU_DM_PIPE_CRC_SOURCE_AUTO) {
2151 - if (dc_stream_configure_crc(stream_state->ctx->dc,
2152 - stream_state,
2153 - true, true)) {
2154 - crtc_state->crc_enabled = true;
2155 - dc_stream_set_dither_option(stream_state,
2156 - DITHER_OPTION_TRUN8);
2157 - }
2158 - else
2159 - return -EINVAL;
2160 - } else {
2161 - if (dc_stream_configure_crc(stream_state->ctx->dc,
2162 - stream_state,
2163 - false, false)) {
2164 - crtc_state->crc_enabled = false;
2165 - dc_stream_set_dither_option(stream_state,
2166 - DITHER_OPTION_DEFAULT);
2167 - }
2168 - else
2169 - return -EINVAL;
2170 - }
2171 + dc_stream_set_dither_option(stream_state,
2172 + enable ? DITHER_OPTION_TRUN8
2173 + : DITHER_OPTION_DEFAULT);
2174 +
2175 + /*
2176 + * Reading the CRC requires the vblank interrupt handler to be
2177 + * enabled. Keep a reference until CRC capture stops.
2178 + */
2179 + if (!crtc_state->crc_enabled && enable)
2180 + drm_crtc_vblank_get(crtc);
2181 + else if (crtc_state->crc_enabled && !enable)
2182 + drm_crtc_vblank_put(crtc);
2183 +
2184 + crtc_state->crc_enabled = enable;
2185
2186 /* Reset crc_skipped on dm state */
2187 crtc_state->crc_skip_count = 0;
2188 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2189 index 1b0d209d8367..3b95a637b508 100644
2190 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2191 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
2192 @@ -239,6 +239,7 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
2193 &init_params);
2194
2195 dc_sink->priv = aconnector;
2196 + /* dc_link_add_remote_sink returns a new reference */
2197 aconnector->dc_sink = dc_sink;
2198
2199 if (aconnector->dc_sink)
2200 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
2201 index 5fd52094d459..1f92e7e8e3d3 100644
2202 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c
2203 +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
2204 @@ -1078,6 +1078,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
2205 /* pplib is notified if disp_num changed */
2206 dc->hwss.optimize_bandwidth(dc, context);
2207
2208 + for (i = 0; i < context->stream_count; i++)
2209 + context->streams[i]->mode_changed = false;
2210 +
2211 dc_release_state(dc->current_state);
2212
2213 dc->current_state = context;
2214 diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
2215 index b0265dbebd4c..583eb367850f 100644
2216 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
2217 +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
2218 @@ -792,6 +792,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
2219 sink->dongle_max_pix_clk = sink_caps.max_hdmi_pixel_clock;
2220 sink->converter_disable_audio = converter_disable_audio;
2221
2222 + /* dc_sink_create returns a new reference */
2223 link->local_sink = sink;
2224
2225 edid_status = dm_helpers_read_local_edid(
2226 diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
2227 index 41883c981789..a684b38332ac 100644
2228 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
2229 +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
2230 @@ -2334,9 +2334,10 @@ static void dcn10_apply_ctx_for_surface(
2231 }
2232 }
2233
2234 - if (!pipe_ctx->plane_state &&
2235 - old_pipe_ctx->plane_state &&
2236 - old_pipe_ctx->stream_res.tg == tg) {
2237 + if ((!pipe_ctx->plane_state ||
2238 + pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
2239 + old_pipe_ctx->plane_state &&
2240 + old_pipe_ctx->stream_res.tg == tg) {
2241
2242 dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx);
2243 removed_pipe[i] = true;
2244 diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
2245 index 529414556962..1a244c53252c 100644
2246 --- a/drivers/gpu/drm/drm_dp_mst_topology.c
2247 +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
2248 @@ -3286,6 +3286,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
2249 msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
2250 msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
2251 msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
2252 + msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
2253 }
2254 msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
2255 msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
2256 diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
2257 index 70fc8e356b18..edd8cb497f3b 100644
2258 --- a/drivers/gpu/drm/drm_fb_helper.c
2259 +++ b/drivers/gpu/drm/drm_fb_helper.c
2260 @@ -2891,7 +2891,7 @@ int drm_fb_helper_fbdev_setup(struct drm_device *dev,
2261 return 0;
2262
2263 err_drm_fb_helper_fini:
2264 - drm_fb_helper_fini(fb_helper);
2265 + drm_fb_helper_fbdev_teardown(dev);
2266
2267 return ret;
2268 }
2269 diff --git a/drivers/gpu/drm/drm_mode_object.c b/drivers/gpu/drm/drm_mode_object.c
2270 index 004191d01772..15b919f90c5a 100644
2271 --- a/drivers/gpu/drm/drm_mode_object.c
2272 +++ b/drivers/gpu/drm/drm_mode_object.c
2273 @@ -465,6 +465,7 @@ static int set_property_atomic(struct drm_mode_object *obj,
2274
2275 drm_modeset_acquire_init(&ctx, 0);
2276 state->acquire_ctx = &ctx;
2277 +
2278 retry:
2279 if (prop == state->dev->mode_config.dpms_property) {
2280 if (obj->type != DRM_MODE_OBJECT_CONNECTOR) {
2281 diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
2282 index 5f650d8fc66b..4cfb56893b7f 100644
2283 --- a/drivers/gpu/drm/drm_plane.c
2284 +++ b/drivers/gpu/drm/drm_plane.c
2285 @@ -220,6 +220,9 @@ int drm_universal_plane_init(struct drm_device *dev, struct drm_plane *plane,
2286 format_modifier_count++;
2287 }
2288
2289 + if (format_modifier_count)
2290 + config->allow_fb_modifiers = true;
2291 +
2292 plane->modifier_count = format_modifier_count;
2293 plane->modifiers = kmalloc_array(format_modifier_count,
2294 sizeof(format_modifiers[0]),
2295 diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
2296 index cb307a2abf06..7316b4ab1b85 100644
2297 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
2298 +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
2299 @@ -23,11 +23,14 @@ struct dpu_mdss {
2300 struct dpu_irq_controller irq_controller;
2301 };
2302
2303 -static irqreturn_t dpu_mdss_irq(int irq, void *arg)
2304 +static void dpu_mdss_irq(struct irq_desc *desc)
2305 {
2306 - struct dpu_mdss *dpu_mdss = arg;
2307 + struct dpu_mdss *dpu_mdss = irq_desc_get_handler_data(desc);
2308 + struct irq_chip *chip = irq_desc_get_chip(desc);
2309 u32 interrupts;
2310
2311 + chained_irq_enter(chip, desc);
2312 +
2313 interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
2314
2315 while (interrupts) {
2316 @@ -39,20 +42,20 @@ static irqreturn_t dpu_mdss_irq(int irq, void *arg)
2317 hwirq);
2318 if (mapping == 0) {
2319 DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
2320 - return IRQ_NONE;
2321 + break;
2322 }
2323
2324 rc = generic_handle_irq(mapping);
2325 if (rc < 0) {
2326 DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
2327 hwirq, mapping, rc);
2328 - return IRQ_NONE;
2329 + break;
2330 }
2331
2332 interrupts &= ~(1 << hwirq);
2333 }
2334
2335 - return IRQ_HANDLED;
2336 + chained_irq_exit(chip, desc);
2337 }
2338
2339 static void dpu_mdss_irq_mask(struct irq_data *irqd)
2340 @@ -83,16 +86,16 @@ static struct irq_chip dpu_mdss_irq_chip = {
2341 .irq_unmask = dpu_mdss_irq_unmask,
2342 };
2343
2344 +static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
2345 +
2346 static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
2347 unsigned int irq, irq_hw_number_t hwirq)
2348 {
2349 struct dpu_mdss *dpu_mdss = domain->host_data;
2350 - int ret;
2351
2352 + irq_set_lockdep_class(irq, &dpu_mdss_lock_key, &dpu_mdss_request_key);
2353 irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
2354 - ret = irq_set_chip_data(irq, dpu_mdss);
2355 -
2356 - return ret;
2357 + return irq_set_chip_data(irq, dpu_mdss);
2358 }
2359
2360 static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
2361 @@ -159,11 +162,13 @@ static void dpu_mdss_destroy(struct drm_device *dev)
2362 struct msm_drm_private *priv = dev->dev_private;
2363 struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
2364 struct dss_module_power *mp = &dpu_mdss->mp;
2365 + int irq;
2366
2367 pm_runtime_suspend(dev->dev);
2368 pm_runtime_disable(dev->dev);
2369 _dpu_mdss_irq_domain_fini(dpu_mdss);
2370 - free_irq(platform_get_irq(pdev, 0), dpu_mdss);
2371 + irq = platform_get_irq(pdev, 0);
2372 + irq_set_chained_handler_and_data(irq, NULL, NULL);
2373 msm_dss_put_clk(mp->clk_config, mp->num_clk);
2374 devm_kfree(&pdev->dev, mp->clk_config);
2375
2376 @@ -187,6 +192,7 @@ int dpu_mdss_init(struct drm_device *dev)
2377 struct dpu_mdss *dpu_mdss;
2378 struct dss_module_power *mp;
2379 int ret = 0;
2380 + int irq;
2381
2382 dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
2383 if (!dpu_mdss)
2384 @@ -219,12 +225,12 @@ int dpu_mdss_init(struct drm_device *dev)
2385 if (ret)
2386 goto irq_domain_error;
2387
2388 - ret = request_irq(platform_get_irq(pdev, 0),
2389 - dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
2390 - if (ret) {
2391 - DPU_ERROR("failed to init irq: %d\n", ret);
2392 + irq = platform_get_irq(pdev, 0);
2393 + if (irq < 0)
2394 goto irq_error;
2395 - }
2396 +
2397 + irq_set_chained_handler_and_data(irq, dpu_mdss_irq,
2398 + dpu_mdss);
2399
2400 pm_runtime_enable(dev->dev);
2401
2402 diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
2403 index 6a4ca139cf5d..8fd8124d72ba 100644
2404 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
2405 +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c
2406 @@ -750,7 +750,9 @@ static int nv17_tv_set_property(struct drm_encoder *encoder,
2407 /* Disable the crtc to ensure a full modeset is
2408 * performed whenever it's turned on again. */
2409 if (crtc)
2410 - drm_crtc_force_disable(crtc);
2411 + drm_crtc_helper_set_mode(crtc, &crtc->mode,
2412 + crtc->x, crtc->y,
2413 + crtc->primary->fb);
2414 }
2415
2416 return 0;
2417 diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
2418 index 9c7007d45408..f9a90ff24e6d 100644
2419 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c
2420 +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c
2421 @@ -331,6 +331,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
2422 dev_dbg(rcdu->dev,
2423 "connected entity %pOF is disabled, skipping\n",
2424 entity);
2425 + of_node_put(entity);
2426 return -ENODEV;
2427 }
2428
2429 @@ -366,6 +367,7 @@ static int rcar_du_encoders_init_one(struct rcar_du_device *rcdu,
2430 dev_warn(rcdu->dev,
2431 "no encoder found for endpoint %pOF, skipping\n",
2432 ep->local_node);
2433 + of_node_put(entity);
2434 return -ENODEV;
2435 }
2436
2437 diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
2438 index e2942c9a11a7..35ddbec1375a 100644
2439 --- a/drivers/gpu/drm/scheduler/sched_entity.c
2440 +++ b/drivers/gpu/drm/scheduler/sched_entity.c
2441 @@ -52,12 +52,12 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
2442 {
2443 int i;
2444
2445 - if (!(entity && rq_list && num_rq_list > 0 && rq_list[0]))
2446 + if (!(entity && rq_list && (num_rq_list == 0 || rq_list[0])))
2447 return -EINVAL;
2448
2449 memset(entity, 0, sizeof(struct drm_sched_entity));
2450 INIT_LIST_HEAD(&entity->list);
2451 - entity->rq = rq_list[0];
2452 + entity->rq = NULL;
2453 entity->guilty = guilty;
2454 entity->num_rq_list = num_rq_list;
2455 entity->rq_list = kcalloc(num_rq_list, sizeof(struct drm_sched_rq *),
2456 @@ -67,6 +67,10 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
2457
2458 for (i = 0; i < num_rq_list; ++i)
2459 entity->rq_list[i] = rq_list[i];
2460 +
2461 + if (num_rq_list)
2462 + entity->rq = rq_list[0];
2463 +
2464 entity->last_scheduled = NULL;
2465
2466 spin_lock_init(&entity->rq_lock);
2467 @@ -165,6 +169,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
2468 struct task_struct *last_user;
2469 long ret = timeout;
2470
2471 + if (!entity->rq)
2472 + return 0;
2473 +
2474 sched = entity->rq->sched;
2475 /**
2476 * The client will not queue more IBs during this fini, consume existing
2477 @@ -264,20 +271,24 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
2478 */
2479 void drm_sched_entity_fini(struct drm_sched_entity *entity)
2480 {
2481 - struct drm_gpu_scheduler *sched;
2482 + struct drm_gpu_scheduler *sched = NULL;
2483
2484 - sched = entity->rq->sched;
2485 - drm_sched_rq_remove_entity(entity->rq, entity);
2486 + if (entity->rq) {
2487 + sched = entity->rq->sched;
2488 + drm_sched_rq_remove_entity(entity->rq, entity);
2489 + }
2490
2491 /* Consumption of existing IBs wasn't completed. Forcefully
2492 * remove them here.
2493 */
2494 if (spsc_queue_peek(&entity->job_queue)) {
2495 - /* Park the kernel for a moment to make sure it isn't processing
2496 - * our enity.
2497 - */
2498 - kthread_park(sched->thread);
2499 - kthread_unpark(sched->thread);
2500 + if (sched) {
2501 + /* Park the kernel for a moment to make sure it isn't processing
2502 + * our enity.
2503 + */
2504 + kthread_park(sched->thread);
2505 + kthread_unpark(sched->thread);
2506 + }
2507 if (entity->dependency) {
2508 dma_fence_remove_callback(entity->dependency,
2509 &entity->cb);
2510 @@ -362,9 +373,11 @@ void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
2511 for (i = 0; i < entity->num_rq_list; ++i)
2512 drm_sched_entity_set_rq_priority(&entity->rq_list[i], priority);
2513
2514 - drm_sched_rq_remove_entity(entity->rq, entity);
2515 - drm_sched_entity_set_rq_priority(&entity->rq, priority);
2516 - drm_sched_rq_add_entity(entity->rq, entity);
2517 + if (entity->rq) {
2518 + drm_sched_rq_remove_entity(entity->rq, entity);
2519 + drm_sched_entity_set_rq_priority(&entity->rq, priority);
2520 + drm_sched_rq_add_entity(entity->rq, entity);
2521 + }
2522
2523 spin_unlock(&entity->rq_lock);
2524 }
2525 diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
2526 index e747a7d16739..1054f535178a 100644
2527 --- a/drivers/gpu/drm/vkms/vkms_crtc.c
2528 +++ b/drivers/gpu/drm/vkms/vkms_crtc.c
2529 @@ -4,13 +4,17 @@
2530 #include <drm/drm_atomic_helper.h>
2531 #include <drm/drm_crtc_helper.h>
2532
2533 -static void _vblank_handle(struct vkms_output *output)
2534 +static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
2535 {
2536 + struct vkms_output *output = container_of(timer, struct vkms_output,
2537 + vblank_hrtimer);
2538 struct drm_crtc *crtc = &output->crtc;
2539 struct vkms_crtc_state *state = to_vkms_crtc_state(crtc->state);
2540 + int ret_overrun;
2541 bool ret;
2542
2543 spin_lock(&output->lock);
2544 +
2545 ret = drm_crtc_handle_vblank(crtc);
2546 if (!ret)
2547 DRM_ERROR("vkms failure on handling vblank");
2548 @@ -31,19 +35,9 @@ static void _vblank_handle(struct vkms_output *output)
2549 DRM_WARN("failed to queue vkms_crc_work_handle");
2550 }
2551
2552 - spin_unlock(&output->lock);
2553 -}
2554 -
2555 -static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
2556 -{
2557 - struct vkms_output *output = container_of(timer, struct vkms_output,
2558 - vblank_hrtimer);
2559 - int ret_overrun;
2560 -
2561 - _vblank_handle(output);
2562 -
2563 ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
2564 output->period_ns);
2565 + spin_unlock(&output->lock);
2566
2567 return HRTIMER_RESTART;
2568 }
2569 @@ -81,6 +75,9 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
2570
2571 *vblank_time = output->vblank_hrtimer.node.expires;
2572
2573 + if (!in_vblank_irq)
2574 + *vblank_time -= output->period_ns;
2575 +
2576 return true;
2577 }
2578
2579 diff --git a/drivers/hid/intel-ish-hid/ipc/ipc.c b/drivers/hid/intel-ish-hid/ipc/ipc.c
2580 index 742191bb24c6..45e33c7ba9a6 100644
2581 --- a/drivers/hid/intel-ish-hid/ipc/ipc.c
2582 +++ b/drivers/hid/intel-ish-hid/ipc/ipc.c
2583 @@ -91,7 +91,10 @@ static bool check_generated_interrupt(struct ishtp_device *dev)
2584 IPC_INT_FROM_ISH_TO_HOST_CHV_AB(pisr_val);
2585 } else {
2586 pisr_val = ish_reg_read(dev, IPC_REG_PISR_BXT);
2587 - interrupt_generated = IPC_INT_FROM_ISH_TO_HOST_BXT(pisr_val);
2588 + interrupt_generated = !!pisr_val;
2589 + /* only busy-clear bit is RW, others are RO */
2590 + if (pisr_val)
2591 + ish_reg_write(dev, IPC_REG_PISR_BXT, pisr_val);
2592 }
2593
2594 return interrupt_generated;
2595 @@ -839,11 +842,11 @@ int ish_hw_start(struct ishtp_device *dev)
2596 {
2597 ish_set_host_rdy(dev);
2598
2599 + set_host_ready(dev);
2600 +
2601 /* After that we can enable ISH DMA operation and wakeup ISHFW */
2602 ish_wakeup(dev);
2603
2604 - set_host_ready(dev);
2605 -
2606 /* wait for FW-initiated reset flow */
2607 if (!dev->recvd_hw_ready)
2608 wait_event_interruptible_timeout(dev->wait_hw_ready,
2609 diff --git a/drivers/hid/intel-ish-hid/ishtp/bus.c b/drivers/hid/intel-ish-hid/ishtp/bus.c
2610 index 728dc6d4561a..a271d6d169b1 100644
2611 --- a/drivers/hid/intel-ish-hid/ishtp/bus.c
2612 +++ b/drivers/hid/intel-ish-hid/ishtp/bus.c
2613 @@ -675,7 +675,8 @@ int ishtp_cl_device_bind(struct ishtp_cl *cl)
2614 spin_lock_irqsave(&cl->dev->device_list_lock, flags);
2615 list_for_each_entry(cl_device, &cl->dev->device_list,
2616 device_link) {
2617 - if (cl_device->fw_client->client_id == cl->fw_client_id) {
2618 + if (cl_device->fw_client &&
2619 + cl_device->fw_client->client_id == cl->fw_client_id) {
2620 cl->device = cl_device;
2621 rv = 0;
2622 break;
2623 @@ -735,6 +736,7 @@ void ishtp_bus_remove_all_clients(struct ishtp_device *ishtp_dev,
2624 spin_lock_irqsave(&ishtp_dev->device_list_lock, flags);
2625 list_for_each_entry_safe(cl_device, n, &ishtp_dev->device_list,
2626 device_link) {
2627 + cl_device->fw_client = NULL;
2628 if (warm_reset && cl_device->reference_count)
2629 continue;
2630
2631 diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c
2632 index abe8249b893b..f21eb28b6782 100644
2633 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c
2634 +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c
2635 @@ -177,15 +177,15 @@ static void etm_free_aux(void *data)
2636 schedule_work(&event_data->work);
2637 }
2638
2639 -static void *etm_setup_aux(int event_cpu, void **pages,
2640 +static void *etm_setup_aux(struct perf_event *event, void **pages,
2641 int nr_pages, bool overwrite)
2642 {
2643 - int cpu;
2644 + int cpu = event->cpu;
2645 cpumask_t *mask;
2646 struct coresight_device *sink;
2647 struct etm_event_data *event_data = NULL;
2648
2649 - event_data = alloc_event_data(event_cpu);
2650 + event_data = alloc_event_data(cpu);
2651 if (!event_data)
2652 return NULL;
2653 INIT_WORK(&event_data->work, free_event_data);
2654 diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
2655 index 53e2fb6e86f6..fe76b176974a 100644
2656 --- a/drivers/hwtracing/coresight/coresight-etm4x.c
2657 +++ b/drivers/hwtracing/coresight/coresight-etm4x.c
2658 @@ -55,7 +55,8 @@ static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
2659
2660 static bool etm4_arch_supported(u8 arch)
2661 {
2662 - switch (arch) {
2663 + /* Mask out the minor version number */
2664 + switch (arch & 0xf0) {
2665 case ETM_ARCH_V4:
2666 break;
2667 default:
2668 diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
2669 index b4a0b2b99a78..6b4ef1d38fb2 100644
2670 --- a/drivers/i2c/busses/i2c-designware-core.h
2671 +++ b/drivers/i2c/busses/i2c-designware-core.h
2672 @@ -215,6 +215,7 @@
2673 * @disable_int: function to disable all interrupts
2674 * @init: function to initialize the I2C hardware
2675 * @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE
2676 + * @suspended: set to true if the controller is suspended
2677 *
2678 * HCNT and LCNT parameters can be used if the platform knows more accurate
2679 * values than the one computed based only on the input clock frequency.
2680 @@ -270,6 +271,7 @@ struct dw_i2c_dev {
2681 int (*set_sda_hold_time)(struct dw_i2c_dev *dev);
2682 int mode;
2683 struct i2c_bus_recovery_info rinfo;
2684 + bool suspended;
2685 };
2686
2687 #define ACCESS_SWAP 0x00000001
2688 diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
2689 index 8d1bc44d2530..bb8e3f149979 100644
2690 --- a/drivers/i2c/busses/i2c-designware-master.c
2691 +++ b/drivers/i2c/busses/i2c-designware-master.c
2692 @@ -426,6 +426,12 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
2693
2694 pm_runtime_get_sync(dev->dev);
2695
2696 + if (dev->suspended) {
2697 + dev_err(dev->dev, "Error %s call while suspended\n", __func__);
2698 + ret = -ESHUTDOWN;
2699 + goto done_nolock;
2700 + }
2701 +
2702 reinit_completion(&dev->cmd_complete);
2703 dev->msgs = msgs;
2704 dev->msgs_num = num;
2705 diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
2706 index d50f80487214..76810deb2de6 100644
2707 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c
2708 +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
2709 @@ -176,6 +176,7 @@ static int i2c_dw_pci_suspend(struct device *dev)
2710 struct pci_dev *pdev = to_pci_dev(dev);
2711 struct dw_i2c_dev *i_dev = pci_get_drvdata(pdev);
2712
2713 + i_dev->suspended = true;
2714 i_dev->disable(i_dev);
2715
2716 return 0;
2717 @@ -185,8 +186,12 @@ static int i2c_dw_pci_resume(struct device *dev)
2718 {
2719 struct pci_dev *pdev = to_pci_dev(dev);
2720 struct dw_i2c_dev *i_dev = pci_get_drvdata(pdev);
2721 + int ret;
2722
2723 - return i_dev->init(i_dev);
2724 + ret = i_dev->init(i_dev);
2725 + i_dev->suspended = false;
2726 +
2727 + return ret;
2728 }
2729 #endif
2730
2731 diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
2732 index 9eaac3be1f63..ead5e7de3e4d 100644
2733 --- a/drivers/i2c/busses/i2c-designware-platdrv.c
2734 +++ b/drivers/i2c/busses/i2c-designware-platdrv.c
2735 @@ -454,6 +454,8 @@ static int dw_i2c_plat_suspend(struct device *dev)
2736 {
2737 struct dw_i2c_dev *i_dev = dev_get_drvdata(dev);
2738
2739 + i_dev->suspended = true;
2740 +
2741 if (i_dev->shared_with_punit)
2742 return 0;
2743
2744 @@ -471,6 +473,7 @@ static int dw_i2c_plat_resume(struct device *dev)
2745 i2c_dw_prepare_clk(i_dev, true);
2746
2747 i_dev->init(i_dev);
2748 + i_dev->suspended = false;
2749
2750 return 0;
2751 }
2752 diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
2753 index 28460f6a60cc..af87a16ac3a5 100644
2754 --- a/drivers/i2c/i2c-core-base.c
2755 +++ b/drivers/i2c/i2c-core-base.c
2756 @@ -430,7 +430,7 @@ static int i2c_device_remove(struct device *dev)
2757 dev_pm_clear_wake_irq(&client->dev);
2758 device_init_wakeup(&client->dev, false);
2759
2760 - client->irq = 0;
2761 + client->irq = client->init_irq;
2762
2763 return status;
2764 }
2765 @@ -741,10 +741,11 @@ i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info)
2766 client->flags = info->flags;
2767 client->addr = info->addr;
2768
2769 - client->irq = info->irq;
2770 - if (!client->irq)
2771 - client->irq = i2c_dev_irq_from_resources(info->resources,
2772 + client->init_irq = info->irq;
2773 + if (!client->init_irq)
2774 + client->init_irq = i2c_dev_irq_from_resources(info->resources,
2775 info->num_resources);
2776 + client->irq = client->init_irq;
2777
2778 strlcpy(client->name, info->type, sizeof(client->name));
2779
2780 diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c
2781 index 6cb7ad608bcd..0f01cdba9d2c 100644
2782 --- a/drivers/i2c/i2c-core-of.c
2783 +++ b/drivers/i2c/i2c-core-of.c
2784 @@ -121,6 +121,17 @@ static int of_dev_node_match(struct device *dev, void *data)
2785 return dev->of_node == data;
2786 }
2787
2788 +static int of_dev_or_parent_node_match(struct device *dev, void *data)
2789 +{
2790 + if (dev->of_node == data)
2791 + return 1;
2792 +
2793 + if (dev->parent)
2794 + return dev->parent->of_node == data;
2795 +
2796 + return 0;
2797 +}
2798 +
2799 /* must call put_device() when done with returned i2c_client device */
2800 struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
2801 {
2802 @@ -145,7 +156,8 @@ struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
2803 struct device *dev;
2804 struct i2c_adapter *adapter;
2805
2806 - dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
2807 + dev = bus_find_device(&i2c_bus_type, NULL, node,
2808 + of_dev_or_parent_node_match);
2809 if (!dev)
2810 return NULL;
2811
2812 diff --git a/drivers/iio/adc/qcom-pm8xxx-xoadc.c b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
2813 index c30c002f1fef..4735f8a1ca9d 100644
2814 --- a/drivers/iio/adc/qcom-pm8xxx-xoadc.c
2815 +++ b/drivers/iio/adc/qcom-pm8xxx-xoadc.c
2816 @@ -423,18 +423,14 @@ static irqreturn_t pm8xxx_eoc_irq(int irq, void *d)
2817 static struct pm8xxx_chan_info *
2818 pm8xxx_get_channel(struct pm8xxx_xoadc *adc, u8 chan)
2819 {
2820 - struct pm8xxx_chan_info *ch;
2821 int i;
2822
2823 for (i = 0; i < adc->nchans; i++) {
2824 - ch = &adc->chans[i];
2825 + struct pm8xxx_chan_info *ch = &adc->chans[i];
2826 if (ch->hwchan->amux_channel == chan)
2827 - break;
2828 + return ch;
2829 }
2830 - if (i == adc->nchans)
2831 - return NULL;
2832 -
2833 - return ch;
2834 + return NULL;
2835 }
2836
2837 static int pm8xxx_read_channel_rsv(struct pm8xxx_xoadc *adc,
2838 diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
2839 index 8221813219e5..25a81fbb0d4d 100644
2840 --- a/drivers/infiniband/hw/cxgb4/cm.c
2841 +++ b/drivers/infiniband/hw/cxgb4/cm.c
2842 @@ -1903,8 +1903,10 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2843 }
2844 mutex_unlock(&ep->com.mutex);
2845
2846 - if (release)
2847 + if (release) {
2848 + close_complete_upcall(ep, -ECONNRESET);
2849 release_ep_resources(ep);
2850 + }
2851 c4iw_put_ep(&ep->com);
2852 return 0;
2853 }
2854 @@ -3606,7 +3608,6 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2855 if (close) {
2856 if (abrupt) {
2857 set_bit(EP_DISC_ABORT, &ep->com.history);
2858 - close_complete_upcall(ep, -ECONNRESET);
2859 ret = send_abort(ep);
2860 } else {
2861 set_bit(EP_DISC_CLOSE, &ep->com.history);
2862 diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c
2863 index fedaf8260105..8c79a480f2b7 100644
2864 --- a/drivers/infiniband/hw/mlx4/cm.c
2865 +++ b/drivers/infiniband/hw/mlx4/cm.c
2866 @@ -39,7 +39,7 @@
2867
2868 #include "mlx4_ib.h"
2869
2870 -#define CM_CLEANUP_CACHE_TIMEOUT (5 * HZ)
2871 +#define CM_CLEANUP_CACHE_TIMEOUT (30 * HZ)
2872
2873 struct id_map_entry {
2874 struct rb_node node;
2875 diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
2876 index 23520df7650f..55cd6e0b409c 100644
2877 --- a/drivers/input/misc/soc_button_array.c
2878 +++ b/drivers/input/misc/soc_button_array.c
2879 @@ -373,7 +373,7 @@ static struct soc_button_info soc_button_PNP0C40[] = {
2880 { "home", 1, EV_KEY, KEY_LEFTMETA, false, true },
2881 { "volume_up", 2, EV_KEY, KEY_VOLUMEUP, true, false },
2882 { "volume_down", 3, EV_KEY, KEY_VOLUMEDOWN, true, false },
2883 - { "rotation_lock", 4, EV_SW, SW_ROTATE_LOCK, false, false },
2884 + { "rotation_lock", 4, EV_KEY, KEY_ROTATE_LOCK_TOGGLE, false, false },
2885 { }
2886 };
2887
2888 diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
2889 index 78188bf7e90d..dbd6824dfffa 100644
2890 --- a/drivers/iommu/intel-iommu.c
2891 +++ b/drivers/iommu/intel-iommu.c
2892 @@ -2485,7 +2485,8 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
2893 if (dev && dev_is_pci(dev)) {
2894 struct pci_dev *pdev = to_pci_dev(info->dev);
2895
2896 - if (!pci_ats_disabled() &&
2897 + if (!pdev->untrusted &&
2898 + !pci_ats_disabled() &&
2899 ecap_dev_iotlb_support(iommu->ecap) &&
2900 pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ATS) &&
2901 dmar_find_matched_atsr_unit(pdev))
2902 diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c
2903 index 1b9e40a203e0..18a8330e1882 100644
2904 --- a/drivers/iommu/io-pgtable-arm-v7s.c
2905 +++ b/drivers/iommu/io-pgtable-arm-v7s.c
2906 @@ -228,7 +228,8 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp,
2907 if (dma != phys)
2908 goto out_unmap;
2909 }
2910 - kmemleak_ignore(table);
2911 + if (lvl == 2)
2912 + kmemleak_ignore(table);
2913 return table;
2914
2915 out_unmap:
2916 diff --git a/drivers/leds/leds-lp55xx-common.c b/drivers/leds/leds-lp55xx-common.c
2917 index 3d79a6380761..723f2f17497a 100644
2918 --- a/drivers/leds/leds-lp55xx-common.c
2919 +++ b/drivers/leds/leds-lp55xx-common.c
2920 @@ -201,7 +201,7 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
2921
2922 if (!fw) {
2923 dev_err(dev, "firmware request failed\n");
2924 - goto out;
2925 + return;
2926 }
2927
2928 /* handling firmware data is chip dependent */
2929 @@ -214,9 +214,9 @@ static void lp55xx_firmware_loaded(const struct firmware *fw, void *context)
2930
2931 mutex_unlock(&chip->lock);
2932
2933 -out:
2934 /* firmware should be released for other channel use */
2935 release_firmware(chip->fw);
2936 + chip->fw = NULL;
2937 }
2938
2939 static int lp55xx_request_firmware(struct lp55xx_chip *chip)
2940 diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
2941 index 557a8a3270a1..e5daf91310f6 100644
2942 --- a/drivers/md/bcache/sysfs.c
2943 +++ b/drivers/md/bcache/sysfs.c
2944 @@ -287,8 +287,12 @@ STORE(__cached_dev)
2945 sysfs_strtoul_clamp(writeback_rate_update_seconds,
2946 dc->writeback_rate_update_seconds,
2947 1, WRITEBACK_RATE_UPDATE_SECS_MAX);
2948 - d_strtoul(writeback_rate_i_term_inverse);
2949 - d_strtoul_nonzero(writeback_rate_p_term_inverse);
2950 + sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
2951 + dc->writeback_rate_i_term_inverse,
2952 + 1, UINT_MAX);
2953 + sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
2954 + dc->writeback_rate_p_term_inverse,
2955 + 1, UINT_MAX);
2956 d_strtoul_nonzero(writeback_rate_minimum);
2957
2958 sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
2959 @@ -299,7 +303,9 @@ STORE(__cached_dev)
2960 dc->io_disable = v ? 1 : 0;
2961 }
2962
2963 - d_strtoi_h(sequential_cutoff);
2964 + sysfs_strtoul_clamp(sequential_cutoff,
2965 + dc->sequential_cutoff,
2966 + 0, UINT_MAX);
2967 d_strtoi_h(readahead);
2968
2969 if (attr == &sysfs_clear_stats)
2970 @@ -778,8 +784,17 @@ STORE(__bch_cache_set)
2971 c->error_limit = strtoul_or_return(buf);
2972
2973 /* See count_io_errors() for why 88 */
2974 - if (attr == &sysfs_io_error_halflife)
2975 - c->error_decay = strtoul_or_return(buf) / 88;
2976 + if (attr == &sysfs_io_error_halflife) {
2977 + unsigned long v = 0;
2978 + ssize_t ret;
2979 +
2980 + ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
2981 + if (!ret) {
2982 + c->error_decay = v / 88;
2983 + return size;
2984 + }
2985 + return ret;
2986 + }
2987
2988 if (attr == &sysfs_io_disable) {
2989 v = strtoul_or_return(buf);
2990 diff --git a/drivers/md/bcache/sysfs.h b/drivers/md/bcache/sysfs.h
2991 index 3fe82425859c..0ad2715a884e 100644
2992 --- a/drivers/md/bcache/sysfs.h
2993 +++ b/drivers/md/bcache/sysfs.h
2994 @@ -81,9 +81,16 @@ do { \
2995
2996 #define sysfs_strtoul_clamp(file, var, min, max) \
2997 do { \
2998 - if (attr == &sysfs_ ## file) \
2999 - return strtoul_safe_clamp(buf, var, min, max) \
3000 - ?: (ssize_t) size; \
3001 + if (attr == &sysfs_ ## file) { \
3002 + unsigned long v = 0; \
3003 + ssize_t ret; \
3004 + ret = strtoul_safe_clamp(buf, v, min, max); \
3005 + if (!ret) { \
3006 + var = v; \
3007 + return size; \
3008 + } \
3009 + return ret; \
3010 + } \
3011 } while (0)
3012
3013 #define strtoul_or_return(cp) \
3014 diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
3015 index e83b63608262..254c26eb963a 100644
3016 --- a/drivers/md/dm-thin.c
3017 +++ b/drivers/md/dm-thin.c
3018 @@ -3283,6 +3283,13 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
3019 as.argc = argc;
3020 as.argv = argv;
3021
3022 + /* make sure metadata and data are different devices */
3023 + if (!strcmp(argv[0], argv[1])) {
3024 + ti->error = "Error setting metadata or data device";
3025 + r = -EINVAL;
3026 + goto out_unlock;
3027 + }
3028 +
3029 /*
3030 * Set default pool features.
3031 */
3032 @@ -4167,6 +4174,12 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
3033 tc->sort_bio_list = RB_ROOT;
3034
3035 if (argc == 3) {
3036 + if (!strcmp(argv[0], argv[2])) {
3037 + ti->error = "Error setting origin device";
3038 + r = -EINVAL;
3039 + goto bad_origin_dev;
3040 + }
3041 +
3042 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
3043 if (r) {
3044 ti->error = "Error opening origin device";
3045 diff --git a/drivers/media/i2c/mt9m111.c b/drivers/media/i2c/mt9m111.c
3046 index d639b9bcf64a..7a759b4b88cf 100644
3047 --- a/drivers/media/i2c/mt9m111.c
3048 +++ b/drivers/media/i2c/mt9m111.c
3049 @@ -1273,6 +1273,8 @@ static int mt9m111_probe(struct i2c_client *client,
3050 mt9m111->rect.top = MT9M111_MIN_DARK_ROWS;
3051 mt9m111->rect.width = MT9M111_MAX_WIDTH;
3052 mt9m111->rect.height = MT9M111_MAX_HEIGHT;
3053 + mt9m111->width = mt9m111->rect.width;
3054 + mt9m111->height = mt9m111->rect.height;
3055 mt9m111->fmt = &mt9m111_colour_fmts[0];
3056 mt9m111->lastpage = -1;
3057 mutex_init(&mt9m111->power_lock);
3058 diff --git a/drivers/media/i2c/ov7740.c b/drivers/media/i2c/ov7740.c
3059 index 177688afd9a6..8835b831cdc0 100644
3060 --- a/drivers/media/i2c/ov7740.c
3061 +++ b/drivers/media/i2c/ov7740.c
3062 @@ -1101,6 +1101,9 @@ static int ov7740_probe(struct i2c_client *client,
3063 if (ret)
3064 return ret;
3065
3066 + pm_runtime_set_active(&client->dev);
3067 + pm_runtime_enable(&client->dev);
3068 +
3069 ret = ov7740_detect(ov7740);
3070 if (ret)
3071 goto error_detect;
3072 @@ -1123,8 +1126,6 @@ static int ov7740_probe(struct i2c_client *client,
3073 if (ret)
3074 goto error_async_register;
3075
3076 - pm_runtime_set_active(&client->dev);
3077 - pm_runtime_enable(&client->dev);
3078 pm_runtime_idle(&client->dev);
3079
3080 return 0;
3081 @@ -1134,6 +1135,8 @@ error_async_register:
3082 error_init_controls:
3083 ov7740_free_controls(ov7740);
3084 error_detect:
3085 + pm_runtime_disable(&client->dev);
3086 + pm_runtime_set_suspended(&client->dev);
3087 ov7740_set_power(ov7740, 0);
3088 media_entity_cleanup(&ov7740->subdev.entity);
3089
3090 diff --git a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
3091 index 2a5d5002c27e..f761e4d8bf2a 100644
3092 --- a/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
3093 +++ b/drivers/media/platform/mtk-jpeg/mtk_jpeg_core.c
3094 @@ -702,7 +702,7 @@ end:
3095 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, to_vb2_v4l2_buffer(vb));
3096 }
3097
3098 -static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
3099 +static struct vb2_v4l2_buffer *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
3100 enum v4l2_buf_type type)
3101 {
3102 if (V4L2_TYPE_IS_OUTPUT(type))
3103 @@ -714,7 +714,7 @@ static void *mtk_jpeg_buf_remove(struct mtk_jpeg_ctx *ctx,
3104 static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
3105 {
3106 struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
3107 - struct vb2_buffer *vb;
3108 + struct vb2_v4l2_buffer *vb;
3109 int ret = 0;
3110
3111 ret = pm_runtime_get_sync(ctx->jpeg->dev);
3112 @@ -724,14 +724,14 @@ static int mtk_jpeg_start_streaming(struct vb2_queue *q, unsigned int count)
3113 return 0;
3114 err:
3115 while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
3116 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_QUEUED);
3117 + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_QUEUED);
3118 return ret;
3119 }
3120
3121 static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
3122 {
3123 struct mtk_jpeg_ctx *ctx = vb2_get_drv_priv(q);
3124 - struct vb2_buffer *vb;
3125 + struct vb2_v4l2_buffer *vb;
3126
3127 /*
3128 * STREAMOFF is an acknowledgment for source change event.
3129 @@ -743,7 +743,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
3130 struct mtk_jpeg_src_buf *src_buf;
3131
3132 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3133 - src_buf = mtk_jpeg_vb2_to_srcbuf(vb);
3134 + src_buf = mtk_jpeg_vb2_to_srcbuf(&vb->vb2_buf);
3135 mtk_jpeg_set_queue_data(ctx, &src_buf->dec_param);
3136 ctx->state = MTK_JPEG_RUNNING;
3137 } else if (V4L2_TYPE_IS_OUTPUT(q->type)) {
3138 @@ -751,7 +751,7 @@ static void mtk_jpeg_stop_streaming(struct vb2_queue *q)
3139 }
3140
3141 while ((vb = mtk_jpeg_buf_remove(ctx, q->type)))
3142 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(vb), VB2_BUF_STATE_ERROR);
3143 + v4l2_m2m_buf_done(vb, VB2_BUF_STATE_ERROR);
3144
3145 pm_runtime_put_sync(ctx->jpeg->dev);
3146 }
3147 @@ -807,7 +807,7 @@ static void mtk_jpeg_device_run(void *priv)
3148 {
3149 struct mtk_jpeg_ctx *ctx = priv;
3150 struct mtk_jpeg_dev *jpeg = ctx->jpeg;
3151 - struct vb2_buffer *src_buf, *dst_buf;
3152 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
3153 enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
3154 unsigned long flags;
3155 struct mtk_jpeg_src_buf *jpeg_src_buf;
3156 @@ -817,11 +817,11 @@ static void mtk_jpeg_device_run(void *priv)
3157
3158 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3159 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
3160 - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
3161 + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
3162
3163 if (jpeg_src_buf->flags & MTK_JPEG_BUF_FLAGS_LAST_FRAME) {
3164 - for (i = 0; i < dst_buf->num_planes; i++)
3165 - vb2_set_plane_payload(dst_buf, i, 0);
3166 + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
3167 + vb2_set_plane_payload(&dst_buf->vb2_buf, i, 0);
3168 buf_state = VB2_BUF_STATE_DONE;
3169 goto dec_end;
3170 }
3171 @@ -833,8 +833,8 @@ static void mtk_jpeg_device_run(void *priv)
3172 return;
3173 }
3174
3175 - mtk_jpeg_set_dec_src(ctx, src_buf, &bs);
3176 - if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, dst_buf, &fb))
3177 + mtk_jpeg_set_dec_src(ctx, &src_buf->vb2_buf, &bs);
3178 + if (mtk_jpeg_set_dec_dst(ctx, &jpeg_src_buf->dec_param, &dst_buf->vb2_buf, &fb))
3179 goto dec_end;
3180
3181 spin_lock_irqsave(&jpeg->hw_lock, flags);
3182 @@ -849,8 +849,8 @@ static void mtk_jpeg_device_run(void *priv)
3183 dec_end:
3184 v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
3185 v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
3186 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
3187 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
3188 + v4l2_m2m_buf_done(src_buf, buf_state);
3189 + v4l2_m2m_buf_done(dst_buf, buf_state);
3190 v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
3191 }
3192
3193 @@ -921,7 +921,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
3194 {
3195 struct mtk_jpeg_dev *jpeg = priv;
3196 struct mtk_jpeg_ctx *ctx;
3197 - struct vb2_buffer *src_buf, *dst_buf;
3198 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
3199 struct mtk_jpeg_src_buf *jpeg_src_buf;
3200 enum vb2_buffer_state buf_state = VB2_BUF_STATE_ERROR;
3201 u32 dec_irq_ret;
3202 @@ -938,7 +938,7 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
3203
3204 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
3205 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
3206 - jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(src_buf);
3207 + jpeg_src_buf = mtk_jpeg_vb2_to_srcbuf(&src_buf->vb2_buf);
3208
3209 if (dec_irq_ret >= MTK_JPEG_DEC_RESULT_UNDERFLOW)
3210 mtk_jpeg_dec_reset(jpeg->dec_reg_base);
3211 @@ -948,15 +948,15 @@ static irqreturn_t mtk_jpeg_dec_irq(int irq, void *priv)
3212 goto dec_end;
3213 }
3214
3215 - for (i = 0; i < dst_buf->num_planes; i++)
3216 - vb2_set_plane_payload(dst_buf, i,
3217 + for (i = 0; i < dst_buf->vb2_buf.num_planes; i++)
3218 + vb2_set_plane_payload(&dst_buf->vb2_buf, i,
3219 jpeg_src_buf->dec_param.comp_size[i]);
3220
3221 buf_state = VB2_BUF_STATE_DONE;
3222
3223 dec_end:
3224 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf), buf_state);
3225 - v4l2_m2m_buf_done(to_vb2_v4l2_buffer(dst_buf), buf_state);
3226 + v4l2_m2m_buf_done(src_buf, buf_state);
3227 + v4l2_m2m_buf_done(dst_buf, buf_state);
3228 v4l2_m2m_job_finish(jpeg->m2m_dev, ctx->fh.m2m_ctx);
3229 return IRQ_HANDLED;
3230 }
3231 diff --git a/drivers/media/platform/mx2_emmaprp.c b/drivers/media/platform/mx2_emmaprp.c
3232 index 27b078cf98e3..f60f499c596b 100644
3233 --- a/drivers/media/platform/mx2_emmaprp.c
3234 +++ b/drivers/media/platform/mx2_emmaprp.c
3235 @@ -274,7 +274,7 @@ static void emmaprp_device_run(void *priv)
3236 {
3237 struct emmaprp_ctx *ctx = priv;
3238 struct emmaprp_q_data *s_q_data, *d_q_data;
3239 - struct vb2_buffer *src_buf, *dst_buf;
3240 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
3241 struct emmaprp_dev *pcdev = ctx->dev;
3242 unsigned int s_width, s_height;
3243 unsigned int d_width, d_height;
3244 @@ -294,8 +294,8 @@ static void emmaprp_device_run(void *priv)
3245 d_height = d_q_data->height;
3246 d_size = d_width * d_height;
3247
3248 - p_in = vb2_dma_contig_plane_dma_addr(src_buf, 0);
3249 - p_out = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
3250 + p_in = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
3251 + p_out = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
3252 if (!p_in || !p_out) {
3253 v4l2_err(&pcdev->v4l2_dev,
3254 "Acquiring kernel pointers to buffers failed\n");
3255 diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c
3256 index f0719ce24b97..aef8d8dab6ab 100644
3257 --- a/drivers/media/platform/rcar-vin/rcar-core.c
3258 +++ b/drivers/media/platform/rcar-vin/rcar-core.c
3259 @@ -131,9 +131,13 @@ static int rvin_group_link_notify(struct media_link *link, u32 flags,
3260 !is_media_entity_v4l2_video_device(link->sink->entity))
3261 return 0;
3262
3263 - /* If any entity is in use don't allow link changes. */
3264 + /*
3265 + * Don't allow link changes if any entity in the graph is
3266 + * streaming, modifying the CHSEL register fields can disrupt
3267 + * running streams.
3268 + */
3269 media_device_for_each_entity(entity, &group->mdev)
3270 - if (entity->use_count)
3271 + if (entity->stream_count)
3272 return -EBUSY;
3273
3274 mutex_lock(&group->lock);
3275 diff --git a/drivers/media/platform/rockchip/rga/rga.c b/drivers/media/platform/rockchip/rga/rga.c
3276 index 5c653287185f..b096227a9722 100644
3277 --- a/drivers/media/platform/rockchip/rga/rga.c
3278 +++ b/drivers/media/platform/rockchip/rga/rga.c
3279 @@ -43,7 +43,7 @@ static void device_run(void *prv)
3280 {
3281 struct rga_ctx *ctx = prv;
3282 struct rockchip_rga *rga = ctx->rga;
3283 - struct vb2_buffer *src, *dst;
3284 + struct vb2_v4l2_buffer *src, *dst;
3285 unsigned long flags;
3286
3287 spin_lock_irqsave(&rga->ctrl_lock, flags);
3288 @@ -53,8 +53,8 @@ static void device_run(void *prv)
3289 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3290 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
3291
3292 - rga_buf_map(src);
3293 - rga_buf_map(dst);
3294 + rga_buf_map(&src->vb2_buf);
3295 + rga_buf_map(&dst->vb2_buf);
3296
3297 rga_hw_start(rga);
3298
3299 diff --git a/drivers/media/platform/s5p-g2d/g2d.c b/drivers/media/platform/s5p-g2d/g2d.c
3300 index 57ab1d1085d1..971c47165010 100644
3301 --- a/drivers/media/platform/s5p-g2d/g2d.c
3302 +++ b/drivers/media/platform/s5p-g2d/g2d.c
3303 @@ -513,7 +513,7 @@ static void device_run(void *prv)
3304 {
3305 struct g2d_ctx *ctx = prv;
3306 struct g2d_dev *dev = ctx->dev;
3307 - struct vb2_buffer *src, *dst;
3308 + struct vb2_v4l2_buffer *src, *dst;
3309 unsigned long flags;
3310 u32 cmd = 0;
3311
3312 @@ -528,10 +528,10 @@ static void device_run(void *prv)
3313 spin_lock_irqsave(&dev->ctrl_lock, flags);
3314
3315 g2d_set_src_size(dev, &ctx->in);
3316 - g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(src, 0));
3317 + g2d_set_src_addr(dev, vb2_dma_contig_plane_dma_addr(&src->vb2_buf, 0));
3318
3319 g2d_set_dst_size(dev, &ctx->out);
3320 - g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(dst, 0));
3321 + g2d_set_dst_addr(dev, vb2_dma_contig_plane_dma_addr(&dst->vb2_buf, 0));
3322
3323 g2d_set_rop4(dev, ctx->rop);
3324 g2d_set_flip(dev, ctx->flip);
3325 diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
3326 index 3f9000b70385..370942b67d86 100644
3327 --- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
3328 +++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
3329 @@ -793,14 +793,14 @@ static void skip(struct s5p_jpeg_buffer *buf, long len);
3330 static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
3331 {
3332 struct s5p_jpeg *jpeg = ctx->jpeg;
3333 - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3334 + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3335 struct s5p_jpeg_buffer jpeg_buffer;
3336 unsigned int word;
3337 int c, x, components;
3338
3339 jpeg_buffer.size = 2; /* Ls */
3340 jpeg_buffer.data =
3341 - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sos + 2;
3342 + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sos + 2;
3343 jpeg_buffer.curr = 0;
3344
3345 word = 0;
3346 @@ -830,14 +830,14 @@ static void exynos4_jpeg_parse_decode_h_tbl(struct s5p_jpeg_ctx *ctx)
3347 static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
3348 {
3349 struct s5p_jpeg *jpeg = ctx->jpeg;
3350 - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3351 + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3352 struct s5p_jpeg_buffer jpeg_buffer;
3353 unsigned int word;
3354 int c, i, n, j;
3355
3356 for (j = 0; j < ctx->out_q.dht.n; ++j) {
3357 jpeg_buffer.size = ctx->out_q.dht.len[j];
3358 - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
3359 + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
3360 ctx->out_q.dht.marker[j];
3361 jpeg_buffer.curr = 0;
3362
3363 @@ -889,13 +889,13 @@ static void exynos4_jpeg_parse_huff_tbl(struct s5p_jpeg_ctx *ctx)
3364 static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
3365 {
3366 struct s5p_jpeg *jpeg = ctx->jpeg;
3367 - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3368 + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3369 struct s5p_jpeg_buffer jpeg_buffer;
3370 int c, x, components;
3371
3372 jpeg_buffer.size = ctx->out_q.sof_len;
3373 jpeg_buffer.data =
3374 - (unsigned long)vb2_plane_vaddr(vb, 0) + ctx->out_q.sof;
3375 + (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) + ctx->out_q.sof;
3376 jpeg_buffer.curr = 0;
3377
3378 skip(&jpeg_buffer, 5); /* P, Y, X */
3379 @@ -920,14 +920,14 @@ static void exynos4_jpeg_parse_decode_q_tbl(struct s5p_jpeg_ctx *ctx)
3380 static void exynos4_jpeg_parse_q_tbl(struct s5p_jpeg_ctx *ctx)
3381 {
3382 struct s5p_jpeg *jpeg = ctx->jpeg;
3383 - struct vb2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3384 + struct vb2_v4l2_buffer *vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3385 struct s5p_jpeg_buffer jpeg_buffer;
3386 unsigned int word;
3387 int c, i, j;
3388
3389 for (j = 0; j < ctx->out_q.dqt.n; ++j) {
3390 jpeg_buffer.size = ctx->out_q.dqt.len[j];
3391 - jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(vb, 0) +
3392 + jpeg_buffer.data = (unsigned long)vb2_plane_vaddr(&vb->vb2_buf, 0) +
3393 ctx->out_q.dqt.marker[j];
3394 jpeg_buffer.curr = 0;
3395
3396 @@ -1293,13 +1293,16 @@ static int s5p_jpeg_querycap(struct file *file, void *priv,
3397 return 0;
3398 }
3399
3400 -static int enum_fmt(struct s5p_jpeg_fmt *sjpeg_formats, int n,
3401 +static int enum_fmt(struct s5p_jpeg_ctx *ctx,
3402 + struct s5p_jpeg_fmt *sjpeg_formats, int n,
3403 struct v4l2_fmtdesc *f, u32 type)
3404 {
3405 int i, num = 0;
3406 + unsigned int fmt_ver_flag = ctx->jpeg->variant->fmt_ver_flag;
3407
3408 for (i = 0; i < n; ++i) {
3409 - if (sjpeg_formats[i].flags & type) {
3410 + if (sjpeg_formats[i].flags & type &&
3411 + sjpeg_formats[i].flags & fmt_ver_flag) {
3412 /* index-th format of type type found ? */
3413 if (num == f->index)
3414 break;
3415 @@ -1326,11 +1329,11 @@ static int s5p_jpeg_enum_fmt_vid_cap(struct file *file, void *priv,
3416 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
3417
3418 if (ctx->mode == S5P_JPEG_ENCODE)
3419 - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
3420 + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
3421 SJPEG_FMT_FLAG_ENC_CAPTURE);
3422
3423 - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
3424 - SJPEG_FMT_FLAG_DEC_CAPTURE);
3425 + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
3426 + SJPEG_FMT_FLAG_DEC_CAPTURE);
3427 }
3428
3429 static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
3430 @@ -1339,11 +1342,11 @@ static int s5p_jpeg_enum_fmt_vid_out(struct file *file, void *priv,
3431 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv);
3432
3433 if (ctx->mode == S5P_JPEG_ENCODE)
3434 - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
3435 + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
3436 SJPEG_FMT_FLAG_ENC_OUTPUT);
3437
3438 - return enum_fmt(sjpeg_formats, SJPEG_NUM_FORMATS, f,
3439 - SJPEG_FMT_FLAG_DEC_OUTPUT);
3440 + return enum_fmt(ctx, sjpeg_formats, SJPEG_NUM_FORMATS, f,
3441 + SJPEG_FMT_FLAG_DEC_OUTPUT);
3442 }
3443
3444 static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx,
3445 @@ -2072,15 +2075,15 @@ static void s5p_jpeg_device_run(void *priv)
3446 {
3447 struct s5p_jpeg_ctx *ctx = priv;
3448 struct s5p_jpeg *jpeg = ctx->jpeg;
3449 - struct vb2_buffer *src_buf, *dst_buf;
3450 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
3451 unsigned long src_addr, dst_addr, flags;
3452
3453 spin_lock_irqsave(&ctx->jpeg->slock, flags);
3454
3455 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3456 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
3457 - src_addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
3458 - dst_addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
3459 + src_addr = vb2_dma_contig_plane_dma_addr(&src_buf->vb2_buf, 0);
3460 + dst_addr = vb2_dma_contig_plane_dma_addr(&dst_buf->vb2_buf, 0);
3461
3462 s5p_jpeg_reset(jpeg->regs);
3463 s5p_jpeg_poweron(jpeg->regs);
3464 @@ -2153,7 +2156,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
3465 {
3466 struct s5p_jpeg *jpeg = ctx->jpeg;
3467 struct s5p_jpeg_fmt *fmt;
3468 - struct vb2_buffer *vb;
3469 + struct vb2_v4l2_buffer *vb;
3470 struct s5p_jpeg_addr jpeg_addr = {};
3471 u32 pix_size, padding_bytes = 0;
3472
3473 @@ -2172,7 +2175,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
3474 vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
3475 }
3476
3477 - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
3478 + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
3479
3480 if (fmt->colplanes == 2) {
3481 jpeg_addr.cb = jpeg_addr.y + pix_size - padding_bytes;
3482 @@ -2190,7 +2193,7 @@ static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
3483 static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
3484 {
3485 struct s5p_jpeg *jpeg = ctx->jpeg;
3486 - struct vb2_buffer *vb;
3487 + struct vb2_v4l2_buffer *vb;
3488 unsigned int jpeg_addr = 0;
3489
3490 if (ctx->mode == S5P_JPEG_ENCODE)
3491 @@ -2198,7 +2201,7 @@ static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
3492 else
3493 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3494
3495 - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
3496 + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
3497 if (jpeg->variant->version == SJPEG_EXYNOS5433 &&
3498 ctx->mode == S5P_JPEG_DECODE)
3499 jpeg_addr += ctx->out_q.sos;
3500 @@ -2314,7 +2317,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
3501 {
3502 struct s5p_jpeg *jpeg = ctx->jpeg;
3503 struct s5p_jpeg_fmt *fmt;
3504 - struct vb2_buffer *vb;
3505 + struct vb2_v4l2_buffer *vb;
3506 struct s5p_jpeg_addr jpeg_addr = {};
3507 u32 pix_size;
3508
3509 @@ -2328,7 +2331,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
3510 fmt = ctx->cap_q.fmt;
3511 }
3512
3513 - jpeg_addr.y = vb2_dma_contig_plane_dma_addr(vb, 0);
3514 + jpeg_addr.y = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
3515
3516 if (fmt->colplanes == 2) {
3517 jpeg_addr.cb = jpeg_addr.y + pix_size;
3518 @@ -2346,7 +2349,7 @@ static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx)
3519 static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
3520 {
3521 struct s5p_jpeg *jpeg = ctx->jpeg;
3522 - struct vb2_buffer *vb;
3523 + struct vb2_v4l2_buffer *vb;
3524 unsigned int jpeg_addr = 0;
3525
3526 if (ctx->mode == S5P_JPEG_ENCODE)
3527 @@ -2354,7 +2357,7 @@ static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx)
3528 else
3529 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx);
3530
3531 - jpeg_addr = vb2_dma_contig_plane_dma_addr(vb, 0);
3532 + jpeg_addr = vb2_dma_contig_plane_dma_addr(&vb->vb2_buf, 0);
3533 exynos3250_jpeg_jpgadr(jpeg->regs, jpeg_addr);
3534 }
3535
3536 diff --git a/drivers/media/platform/sh_veu.c b/drivers/media/platform/sh_veu.c
3537 index 09ae64a0004c..d277cc674349 100644
3538 --- a/drivers/media/platform/sh_veu.c
3539 +++ b/drivers/media/platform/sh_veu.c
3540 @@ -273,13 +273,13 @@ static void sh_veu_process(struct sh_veu_dev *veu,
3541 static void sh_veu_device_run(void *priv)
3542 {
3543 struct sh_veu_dev *veu = priv;
3544 - struct vb2_buffer *src_buf, *dst_buf;
3545 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
3546
3547 src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
3548 dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
3549
3550 if (src_buf && dst_buf)
3551 - sh_veu_process(veu, src_buf, dst_buf);
3552 + sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
3553 }
3554
3555 /* ========== video ioctls ========== */
3556 diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
3557 index c60a7625b1fa..b2873a2432b6 100644
3558 --- a/drivers/mmc/host/omap.c
3559 +++ b/drivers/mmc/host/omap.c
3560 @@ -920,7 +920,7 @@ static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_reques
3561 reg &= ~(1 << 5);
3562 OMAP_MMC_WRITE(host, SDIO, reg);
3563 /* Set maximum timeout */
3564 - OMAP_MMC_WRITE(host, CTO, 0xff);
3565 + OMAP_MMC_WRITE(host, CTO, 0xfd);
3566 }
3567
3568 static inline void set_data_timeout(struct mmc_omap_host *host, struct mmc_request *req)
3569 diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
3570 index 4a0ec8e87c7a..6cba05a80892 100644
3571 --- a/drivers/net/dsa/mv88e6xxx/chip.c
3572 +++ b/drivers/net/dsa/mv88e6xxx/chip.c
3573 @@ -442,12 +442,20 @@ out_mapping:
3574
3575 static int mv88e6xxx_g1_irq_setup(struct mv88e6xxx_chip *chip)
3576 {
3577 + static struct lock_class_key lock_key;
3578 + static struct lock_class_key request_key;
3579 int err;
3580
3581 err = mv88e6xxx_g1_irq_setup_common(chip);
3582 if (err)
3583 return err;
3584
3585 + /* These lock classes tells lockdep that global 1 irqs are in
3586 + * a different category than their parent GPIO, so it won't
3587 + * report false recursion.
3588 + */
3589 + irq_set_lockdep_class(chip->irq, &lock_key, &request_key);
3590 +
3591 err = request_threaded_irq(chip->irq, NULL,
3592 mv88e6xxx_g1_irq_thread_fn,
3593 IRQF_ONESHOT | IRQF_SHARED,
3594 diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
3595 index 41eee62fed25..c44b2822e4dd 100644
3596 --- a/drivers/net/dsa/mv88e6xxx/port.c
3597 +++ b/drivers/net/dsa/mv88e6xxx/port.c
3598 @@ -480,6 +480,8 @@ int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port,
3599 phy_interface_t mode)
3600 {
3601 switch (mode) {
3602 + case PHY_INTERFACE_MODE_NA:
3603 + return 0;
3604 case PHY_INTERFACE_MODE_XGMII:
3605 case PHY_INTERFACE_MODE_XAUI:
3606 case PHY_INTERFACE_MODE_RXAUI:
3607 diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
3608 index 9a7f70db20c7..733d9172425b 100644
3609 --- a/drivers/net/ethernet/cisco/enic/enic_main.c
3610 +++ b/drivers/net/ethernet/cisco/enic/enic_main.c
3611 @@ -119,7 +119,7 @@ static void enic_init_affinity_hint(struct enic *enic)
3612
3613 for (i = 0; i < enic->intr_count; i++) {
3614 if (enic_is_err_intr(enic, i) || enic_is_notify_intr(enic, i) ||
3615 - (enic->msix[i].affinity_mask &&
3616 + (cpumask_available(enic->msix[i].affinity_mask) &&
3617 !cpumask_empty(enic->msix[i].affinity_mask)))
3618 continue;
3619 if (zalloc_cpumask_var(&enic->msix[i].affinity_mask,
3620 @@ -148,7 +148,7 @@ static void enic_set_affinity_hint(struct enic *enic)
3621 for (i = 0; i < enic->intr_count; i++) {
3622 if (enic_is_err_intr(enic, i) ||
3623 enic_is_notify_intr(enic, i) ||
3624 - !enic->msix[i].affinity_mask ||
3625 + !cpumask_available(enic->msix[i].affinity_mask) ||
3626 cpumask_empty(enic->msix[i].affinity_mask))
3627 continue;
3628 err = irq_set_affinity_hint(enic->msix_entry[i].vector,
3629 @@ -161,7 +161,7 @@ static void enic_set_affinity_hint(struct enic *enic)
3630 for (i = 0; i < enic->wq_count; i++) {
3631 int wq_intr = enic_msix_wq_intr(enic, i);
3632
3633 - if (enic->msix[wq_intr].affinity_mask &&
3634 + if (cpumask_available(enic->msix[wq_intr].affinity_mask) &&
3635 !cpumask_empty(enic->msix[wq_intr].affinity_mask))
3636 netif_set_xps_queue(enic->netdev,
3637 enic->msix[wq_intr].affinity_mask,
3638 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
3639 index efb6c1a25171..3ea72e4d9dc4 100644
3640 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
3641 +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
3642 @@ -1094,10 +1094,10 @@ static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
3643 return 0;
3644 }
3645
3646 -static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
3647 +static enum hnae3_reset_type
3648 +hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
3649 {
3650 - enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET;
3651 - struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
3652 + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
3653 struct device *dev = &hdev->pdev->dev;
3654 struct hclge_desc desc[2];
3655 unsigned int status;
3656 @@ -1110,17 +1110,20 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
3657 if (ret) {
3658 dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
3659 /* reset everything for now */
3660 - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
3661 - return ret;
3662 + return HNAE3_GLOBAL_RESET;
3663 }
3664
3665 status = le32_to_cpu(desc[0].data[0]);
3666
3667 - if (status & HCLGE_ROCEE_RERR_INT_MASK)
3668 + if (status & HCLGE_ROCEE_RERR_INT_MASK) {
3669 dev_warn(dev, "ROCEE RAS AXI rresp error\n");
3670 + reset_type = HNAE3_FUNC_RESET;
3671 + }
3672
3673 - if (status & HCLGE_ROCEE_BERR_INT_MASK)
3674 + if (status & HCLGE_ROCEE_BERR_INT_MASK) {
3675 dev_warn(dev, "ROCEE RAS AXI bresp error\n");
3676 + reset_type = HNAE3_FUNC_RESET;
3677 + }
3678
3679 if (status & HCLGE_ROCEE_ECC_INT_MASK) {
3680 dev_warn(dev, "ROCEE RAS 2bit ECC error\n");
3681 @@ -1132,9 +1135,9 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
3682 if (ret) {
3683 dev_err(dev, "failed(%d) to process ovf error\n", ret);
3684 /* reset everything for now */
3685 - HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET);
3686 - return ret;
3687 + return HNAE3_GLOBAL_RESET;
3688 }
3689 + reset_type = HNAE3_FUNC_RESET;
3690 }
3691
3692 /* clear error status */
3693 @@ -1143,12 +1146,10 @@ static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
3694 if (ret) {
3695 dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
3696 /* reset everything for now */
3697 - reset_type = HNAE3_GLOBAL_RESET;
3698 + return HNAE3_GLOBAL_RESET;
3699 }
3700
3701 - HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
3702 -
3703 - return ret;
3704 + return reset_type;
3705 }
3706
3707 static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
3708 @@ -1178,15 +1179,18 @@ static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
3709 return ret;
3710 }
3711
3712 -static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
3713 +static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
3714 {
3715 + enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
3716 struct hclge_dev *hdev = ae_dev->priv;
3717
3718 if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
3719 hdev->pdev->revision < 0x21)
3720 - return HNAE3_NONE_RESET;
3721 + return;
3722
3723 - return hclge_log_and_clear_rocee_ras_error(hdev);
3724 + reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
3725 + if (reset_type != HNAE3_NONE_RESET)
3726 + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type);
3727 }
3728
3729 static const struct hclge_hw_blk hw_blk[] = {
3730 diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
3731 index 189f231075c2..7acc61e4f645 100644
3732 --- a/drivers/net/ethernet/intel/e1000e/netdev.c
3733 +++ b/drivers/net/ethernet/intel/e1000e/netdev.c
3734 @@ -2106,7 +2106,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
3735 if (strlen(netdev->name) < (IFNAMSIZ - 5))
3736 snprintf(adapter->rx_ring->name,
3737 sizeof(adapter->rx_ring->name) - 1,
3738 - "%s-rx-0", netdev->name);
3739 + "%.14s-rx-0", netdev->name);
3740 else
3741 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
3742 err = request_irq(adapter->msix_entries[vector].vector,
3743 @@ -2122,7 +2122,7 @@ static int e1000_request_msix(struct e1000_adapter *adapter)
3744 if (strlen(netdev->name) < (IFNAMSIZ - 5))
3745 snprintf(adapter->tx_ring->name,
3746 sizeof(adapter->tx_ring->name) - 1,
3747 - "%s-tx-0", netdev->name);
3748 + "%.14s-tx-0", netdev->name);
3749 else
3750 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
3751 err = request_irq(adapter->msix_entries[vector].vector,
3752 @@ -5309,8 +5309,13 @@ static void e1000_watchdog_task(struct work_struct *work)
3753 /* 8000ES2LAN requires a Rx packet buffer work-around
3754 * on link down event; reset the controller to flush
3755 * the Rx packet buffer.
3756 + *
3757 + * If the link is lost the controller stops DMA, but
3758 + * if there is queued Tx work it cannot be done. So
3759 + * reset the controller to flush the Tx packet buffers.
3760 */
3761 - if (adapter->flags & FLAG_RX_NEEDS_RESTART)
3762 + if ((adapter->flags & FLAG_RX_NEEDS_RESTART) ||
3763 + e1000_desc_unused(tx_ring) + 1 < tx_ring->count)
3764 adapter->flags |= FLAG_RESTART_NOW;
3765 else
3766 pm_schedule_suspend(netdev->dev.parent,
3767 @@ -5333,14 +5338,6 @@ link_up:
3768 adapter->gotc_old = adapter->stats.gotc;
3769 spin_unlock(&adapter->stats64_lock);
3770
3771 - /* If the link is lost the controller stops DMA, but
3772 - * if there is queued Tx work it cannot be done. So
3773 - * reset the controller to flush the Tx packet buffers.
3774 - */
3775 - if (!netif_carrier_ok(netdev) &&
3776 - (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
3777 - adapter->flags |= FLAG_RESTART_NOW;
3778 -
3779 /* If reset is necessary, do it outside of interrupt context. */
3780 if (adapter->flags & FLAG_RESTART_NOW) {
3781 schedule_work(&adapter->reset_task);
3782 @@ -7351,6 +7348,8 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3783
3784 e1000_print_device_info(adapter);
3785
3786 + dev_pm_set_driver_flags(&pdev->dev, DPM_FLAG_NEVER_SKIP);
3787 +
3788 if (pci_dev_run_wake(pdev))
3789 pm_runtime_put_noidle(&pdev->dev);
3790
3791 diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c
3792 index 2e5693107fa4..8d602247eb44 100644
3793 --- a/drivers/net/ethernet/intel/ice/ice_switch.c
3794 +++ b/drivers/net/ethernet/intel/ice/ice_switch.c
3795 @@ -1538,9 +1538,20 @@ ice_remove_rule_internal(struct ice_hw *hw, u8 recp_id,
3796 } else if (!list_elem->vsi_list_info) {
3797 status = ICE_ERR_DOES_NOT_EXIST;
3798 goto exit;
3799 + } else if (list_elem->vsi_list_info->ref_cnt > 1) {
3800 + /* a ref_cnt > 1 indicates that the vsi_list is being
3801 + * shared by multiple rules. Decrement the ref_cnt and
3802 + * remove this rule, but do not modify the list, as it
3803 + * is in-use by other rules.
3804 + */
3805 + list_elem->vsi_list_info->ref_cnt--;
3806 + remove_rule = true;
3807 } else {
3808 - if (list_elem->vsi_list_info->ref_cnt > 1)
3809 - list_elem->vsi_list_info->ref_cnt--;
3810 + /* a ref_cnt of 1 indicates the vsi_list is only used
3811 + * by one rule. However, the original removal request is only
3812 + * for a single VSI. Update the vsi_list first, and only
3813 + * remove the rule if there are no further VSIs in this list.
3814 + */
3815 vsi_handle = f_entry->fltr_info.vsi_handle;
3816 status = ice_rem_update_vsi_list(hw, vsi_handle, list_elem);
3817 if (status)
3818 diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3819 index 16066c2d5b3a..931beac3359d 100644
3820 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3821 +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
3822 @@ -1380,13 +1380,9 @@ static void mvpp2_port_reset(struct mvpp2_port *port)
3823 for (i = 0; i < ARRAY_SIZE(mvpp2_ethtool_regs); i++)
3824 mvpp2_read_count(port, &mvpp2_ethtool_regs[i]);
3825
3826 - val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3827 - ~MVPP2_GMAC_PORT_RESET_MASK;
3828 + val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) |
3829 + MVPP2_GMAC_PORT_RESET_MASK;
3830 writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
3831 -
3832 - while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3833 - MVPP2_GMAC_PORT_RESET_MASK)
3834 - continue;
3835 }
3836
3837 /* Change maximum receive size of the port */
3838 @@ -4543,12 +4539,15 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
3839 const struct phylink_link_state *state)
3840 {
3841 u32 an, ctrl0, ctrl2, ctrl4;
3842 + u32 old_ctrl2;
3843
3844 an = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3845 ctrl0 = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
3846 ctrl2 = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
3847 ctrl4 = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
3848
3849 + old_ctrl2 = ctrl2;
3850 +
3851 /* Force link down */
3852 an &= ~MVPP2_GMAC_FORCE_LINK_PASS;
3853 an |= MVPP2_GMAC_FORCE_LINK_DOWN;
3854 @@ -4621,6 +4620,12 @@ static void mvpp2_gmac_config(struct mvpp2_port *port, unsigned int mode,
3855 writel(ctrl2, port->base + MVPP2_GMAC_CTRL_2_REG);
3856 writel(ctrl4, port->base + MVPP22_GMAC_CTRL_4_REG);
3857 writel(an, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
3858 +
3859 + if (old_ctrl2 & MVPP2_GMAC_PORT_RESET_MASK) {
3860 + while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
3861 + MVPP2_GMAC_PORT_RESET_MASK)
3862 + continue;
3863 + }
3864 }
3865
3866 static void mvpp2_mac_config(struct net_device *dev, unsigned int mode,
3867 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3868 index 47233b9a4f81..e6099f51d25f 100644
3869 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3870 +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
3871 @@ -357,6 +357,9 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv,
3872
3873 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
3874 priv->channels.params = new_channels.params;
3875 + if (!netif_is_rxfh_configured(priv->netdev))
3876 + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt,
3877 + MLX5E_INDIR_RQT_SIZE, count);
3878 goto out;
3879 }
3880
3881 diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3882 index 5b492b67f4e1..13c48883ed61 100644
3883 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3884 +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
3885 @@ -1812,7 +1812,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
3886 u64 node_guid;
3887 int err = 0;
3888
3889 - if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
3890 + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
3891 return -EPERM;
3892 if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
3893 return -EINVAL;
3894 @@ -1886,7 +1886,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
3895 {
3896 struct mlx5_vport *evport;
3897
3898 - if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
3899 + if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
3900 return -EPERM;
3901 if (!LEGAL_VPORT(esw, vport))
3902 return -EINVAL;
3903 @@ -2059,19 +2059,24 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
3904 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
3905 u32 max_rate, u32 min_rate)
3906 {
3907 - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
3908 - bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
3909 - fw_max_bw_share >= MLX5_MIN_BW_SHARE;
3910 - bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
3911 struct mlx5_vport *evport;
3912 + u32 fw_max_bw_share;
3913 u32 previous_min_rate;
3914 u32 divider;
3915 + bool min_rate_supported;
3916 + bool max_rate_supported;
3917 int err = 0;
3918
3919 if (!ESW_ALLOWED(esw))
3920 return -EPERM;
3921 if (!LEGAL_VPORT(esw, vport))
3922 return -EINVAL;
3923 +
3924 + fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
3925 + min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
3926 + fw_max_bw_share >= MLX5_MIN_BW_SHARE;
3927 + max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
3928 +
3929 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
3930 return -EOPNOTSUPP;
3931
3932 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3933 index b65e274b02e9..cbdee5164be7 100644
3934 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3935 +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3936 @@ -2105,7 +2105,7 @@ static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
3937 int i;
3938
3939 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
3940 - snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
3941 + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
3942 mlxsw_sp_port_hw_prio_stats[i].str, prio);
3943 *p += ETH_GSTRING_LEN;
3944 }
3945 @@ -2116,7 +2116,7 @@ static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
3946 int i;
3947
3948 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
3949 - snprintf(*p, ETH_GSTRING_LEN, "%s_%d",
3950 + snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
3951 mlxsw_sp_port_hw_tc_stats[i].str, tc);
3952 *p += ETH_GSTRING_LEN;
3953 }
3954 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3955 index 685d20472358..019ab99e65bb 100644
3956 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3957 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
3958 @@ -474,7 +474,7 @@ static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
3959 struct dma_desc *p, struct sk_buff *skb)
3960 {
3961 struct skb_shared_hwtstamps shhwtstamp;
3962 - u64 ns;
3963 + u64 ns = 0;
3964
3965 if (!priv->hwts_tx_en)
3966 return;
3967 @@ -513,7 +513,7 @@ static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
3968 {
3969 struct skb_shared_hwtstamps *shhwtstamp = NULL;
3970 struct dma_desc *desc = p;
3971 - u64 ns;
3972 + u64 ns = 0;
3973
3974 if (!priv->hwts_rx_en)
3975 return;
3976 @@ -558,8 +558,8 @@ static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
3977 u32 snap_type_sel = 0;
3978 u32 ts_master_en = 0;
3979 u32 ts_event_en = 0;
3980 + u32 sec_inc = 0;
3981 u32 value = 0;
3982 - u32 sec_inc;
3983 bool xmac;
3984
3985 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
3986 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
3987 index 2293e21f789f..cc60b3fb0892 100644
3988 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
3989 +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
3990 @@ -105,7 +105,7 @@ static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
3991 struct stmmac_priv *priv =
3992 container_of(ptp, struct stmmac_priv, ptp_clock_ops);
3993 unsigned long flags;
3994 - u64 ns;
3995 + u64 ns = 0;
3996
3997 spin_lock_irqsave(&priv->ptp_lock, flags);
3998 stmmac_get_systime(priv, priv->ptpaddr, &ns);
3999 diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
4000 index 03af927fa5ad..e39bf0428dd9 100644
4001 --- a/drivers/net/phy/phy-c45.c
4002 +++ b/drivers/net/phy/phy-c45.c
4003 @@ -147,9 +147,15 @@ int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask)
4004 mmd_mask &= ~BIT(devad);
4005
4006 /* The link state is latched low so that momentary link
4007 - * drops can be detected. Do not double-read the status
4008 - * register if the link is down.
4009 + * drops can be detected. Do not double-read the status
4010 + * in polling mode to detect such short link drops.
4011 */
4012 + if (!phy_polling_mode(phydev)) {
4013 + val = phy_read_mmd(phydev, devad, MDIO_STAT1);
4014 + if (val < 0)
4015 + return val;
4016 + }
4017 +
4018 val = phy_read_mmd(phydev, devad, MDIO_STAT1);
4019 if (val < 0)
4020 return val;
4021 diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
4022 index 739434fe04fa..adf79614c2db 100644
4023 --- a/drivers/net/phy/phy_device.c
4024 +++ b/drivers/net/phy/phy_device.c
4025 @@ -1683,10 +1683,15 @@ int genphy_update_link(struct phy_device *phydev)
4026 {
4027 int status;
4028
4029 - /* Do a fake read */
4030 - status = phy_read(phydev, MII_BMSR);
4031 - if (status < 0)
4032 - return status;
4033 + /* The link state is latched low so that momentary link
4034 + * drops can be detected. Do not double-read the status
4035 + * in polling mode to detect such short link drops.
4036 + */
4037 + if (!phy_polling_mode(phydev)) {
4038 + status = phy_read(phydev, MII_BMSR);
4039 + if (status < 0)
4040 + return status;
4041 + }
4042
4043 /* Read link and autonegotiation status */
4044 status = phy_read(phydev, MII_BMSR);
4045 diff --git a/drivers/net/veth.c b/drivers/net/veth.c
4046 index f412ea1cef18..b203d1867959 100644
4047 --- a/drivers/net/veth.c
4048 +++ b/drivers/net/veth.c
4049 @@ -115,7 +115,8 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
4050 p += sizeof(ethtool_stats_keys);
4051 for (i = 0; i < dev->real_num_rx_queues; i++) {
4052 for (j = 0; j < VETH_RQ_STATS_LEN; j++) {
4053 - snprintf(p, ETH_GSTRING_LEN, "rx_queue_%u_%s",
4054 + snprintf(p, ETH_GSTRING_LEN,
4055 + "rx_queue_%u_%.11s",
4056 i, veth_rq_stats_desc[j].desc);
4057 p += ETH_GSTRING_LEN;
4058 }
4059 diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
4060 index 2a5668b4f6bc..1a1ea4bbf8a0 100644
4061 --- a/drivers/net/wireless/ath/ath10k/ce.c
4062 +++ b/drivers/net/wireless/ath/ath10k/ce.c
4063 @@ -500,14 +500,8 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
4064 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
4065
4066 /* WORKAROUND */
4067 - if (!(flags & CE_SEND_FLAG_GATHER)) {
4068 - if (ar->hw_params.shadow_reg_support)
4069 - ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
4070 - write_index);
4071 - else
4072 - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
4073 - write_index);
4074 - }
4075 + if (!(flags & CE_SEND_FLAG_GATHER))
4076 + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
4077
4078 src_ring->write_index = write_index;
4079 exit:
4080 @@ -581,8 +575,14 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
4081 /* Update Source Ring Write Index */
4082 write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
4083
4084 - if (!(flags & CE_SEND_FLAG_GATHER))
4085 - ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
4086 + if (!(flags & CE_SEND_FLAG_GATHER)) {
4087 + if (ar->hw_params.shadow_reg_support)
4088 + ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
4089 + write_index);
4090 + else
4091 + ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
4092 + write_index);
4093 + }
4094
4095 src_ring->write_index = write_index;
4096 exit:
4097 @@ -1404,12 +1404,12 @@ static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
4098 u32 nentries)
4099 {
4100 src_ring->shadow_base_unaligned = kcalloc(nentries,
4101 - sizeof(struct ce_desc),
4102 + sizeof(struct ce_desc_64),
4103 GFP_KERNEL);
4104 if (!src_ring->shadow_base_unaligned)
4105 return -ENOMEM;
4106
4107 - src_ring->shadow_base = (struct ce_desc *)
4108 + src_ring->shadow_base = (struct ce_desc_64 *)
4109 PTR_ALIGN(src_ring->shadow_base_unaligned,
4110 CE_DESC_RING_ALIGN);
4111 return 0;
4112 @@ -1461,7 +1461,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
4113 ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
4114 if (ret) {
4115 dma_free_coherent(ar->dev,
4116 - (nentries * sizeof(struct ce_desc) +
4117 + (nentries * sizeof(struct ce_desc_64) +
4118 CE_DESC_RING_ALIGN),
4119 src_ring->base_addr_owner_space_unaligned,
4120 base_addr);
4121 diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
4122 index ead9987c3259..463e2fc8b501 100644
4123 --- a/drivers/net/wireless/ath/ath10k/ce.h
4124 +++ b/drivers/net/wireless/ath/ath10k/ce.h
4125 @@ -118,7 +118,7 @@ struct ath10k_ce_ring {
4126 u32 base_addr_ce_space;
4127
4128 char *shadow_base_unaligned;
4129 - struct ce_desc *shadow_base;
4130 + struct ce_desc_64 *shadow_base;
4131
4132 /* keep last */
4133 void *per_transfer_context[0];
4134 diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
4135 index 4778a455d81a..068f1a7e07d3 100644
4136 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c
4137 +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c
4138 @@ -696,11 +696,12 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
4139 " %llu ", stats->ht[j][i]);
4140 len += scnprintf(buf + len, size - len, "\n");
4141 len += scnprintf(buf + len, size - len,
4142 - " BW %s (20,40,80,160 MHz)\n", str[j]);
4143 + " BW %s (20,5,10,40,80,160 MHz)\n", str[j]);
4144 len += scnprintf(buf + len, size - len,
4145 - " %llu %llu %llu %llu\n",
4146 + " %llu %llu %llu %llu %llu %llu\n",
4147 stats->bw[j][0], stats->bw[j][1],
4148 - stats->bw[j][2], stats->bw[j][3]);
4149 + stats->bw[j][2], stats->bw[j][3],
4150 + stats->bw[j][4], stats->bw[j][5]);
4151 len += scnprintf(buf + len, size - len,
4152 " NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
4153 len += scnprintf(buf + len, size - len,
4154 diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
4155 index f42bac204ef8..ecf34ce7acf0 100644
4156 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c
4157 +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
4158 @@ -2130,9 +2130,15 @@ static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
4159 hdr = (struct ieee80211_hdr *)skb->data;
4160 rx_status = IEEE80211_SKB_RXCB(skb);
4161 rx_status->chains |= BIT(0);
4162 - rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
4163 - rx->ppdu.combined_rssi;
4164 - rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
4165 + if (rx->ppdu.combined_rssi == 0) {
4166 + /* SDIO firmware does not provide signal */
4167 + rx_status->signal = 0;
4168 + rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
4169 + } else {
4170 + rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
4171 + rx->ppdu.combined_rssi;
4172 + rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
4173 + }
4174
4175 spin_lock_bh(&ar->data_lock);
4176 ch = ar->scan_channel;
4177 diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
4178 index 2034ccc7cc72..1d5d0209ebeb 100644
4179 --- a/drivers/net/wireless/ath/ath10k/wmi.h
4180 +++ b/drivers/net/wireless/ath/ath10k/wmi.h
4181 @@ -5003,7 +5003,7 @@ enum wmi_rate_preamble {
4182 #define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1)
4183
4184 #define ATH10K_VHT_MCS_NUM 10
4185 -#define ATH10K_BW_NUM 4
4186 +#define ATH10K_BW_NUM 6
4187 #define ATH10K_NSS_NUM 4
4188 #define ATH10K_LEGACY_NUM 12
4189 #define ATH10K_GI_NUM 2
4190 diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
4191 index 9b2f9f543952..5a44f9d0ff02 100644
4192 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c
4193 +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
4194 @@ -1580,6 +1580,12 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
4195 u8 *buf, *dpos;
4196 const u8 *spos;
4197
4198 + if (!ies1)
4199 + ies1_len = 0;
4200 +
4201 + if (!ies2)
4202 + ies2_len = 0;
4203 +
4204 if (ies1_len == 0 && ies2_len == 0) {
4205 *merged_ies = NULL;
4206 *merged_len = 0;
4207 @@ -1589,17 +1595,19 @@ static int _wil_cfg80211_merge_extra_ies(const u8 *ies1, u16 ies1_len,
4208 buf = kmalloc(ies1_len + ies2_len, GFP_KERNEL);
4209 if (!buf)
4210 return -ENOMEM;
4211 - memcpy(buf, ies1, ies1_len);
4212 + if (ies1)
4213 + memcpy(buf, ies1, ies1_len);
4214 dpos = buf + ies1_len;
4215 spos = ies2;
4216 - while (spos + 1 < ies2 + ies2_len) {
4217 + while (spos && (spos + 1 < ies2 + ies2_len)) {
4218 /* IE tag at offset 0, length at offset 1 */
4219 u16 ielen = 2 + spos[1];
4220
4221 if (spos + ielen > ies2 + ies2_len)
4222 break;
4223 if (spos[0] == WLAN_EID_VENDOR_SPECIFIC &&
4224 - !_wil_cfg80211_find_ie(ies1, ies1_len, spos, ielen)) {
4225 + (!ies1 || !_wil_cfg80211_find_ie(ies1, ies1_len,
4226 + spos, ielen))) {
4227 memcpy(dpos, spos, ielen);
4228 dpos += ielen;
4229 }
4230 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
4231 index 1f1e95a15a17..0ce1d8174e6d 100644
4232 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
4233 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c
4234 @@ -149,7 +149,7 @@ static int brcmf_c_process_clm_blob(struct brcmf_if *ifp)
4235 return err;
4236 }
4237
4238 - err = request_firmware(&clm, clm_name, bus->dev);
4239 + err = firmware_request_nowarn(&clm, clm_name, bus->dev);
4240 if (err) {
4241 brcmf_info("no clm_blob available (err=%d), device may have limited channels available\n",
4242 err);
4243 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
4244 index 0d6c313b6669..19ec55cef802 100644
4245 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
4246 +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c
4247 @@ -127,13 +127,17 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
4248
4249 static int iwl_configure_rxq(struct iwl_mvm *mvm)
4250 {
4251 - int i, num_queues, size;
4252 + int i, num_queues, size, ret;
4253 struct iwl_rfh_queue_config *cmd;
4254 + struct iwl_host_cmd hcmd = {
4255 + .id = WIDE_ID(DATA_PATH_GROUP, RFH_QUEUE_CONFIG_CMD),
4256 + .dataflags[0] = IWL_HCMD_DFL_NOCOPY,
4257 + };
4258
4259 /* Do not configure default queue, it is configured via context info */
4260 num_queues = mvm->trans->num_rx_queues - 1;
4261
4262 - size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
4263 + size = struct_size(cmd, data, num_queues);
4264
4265 cmd = kzalloc(size, GFP_KERNEL);
4266 if (!cmd)
4267 @@ -154,10 +158,14 @@ static int iwl_configure_rxq(struct iwl_mvm *mvm)
4268 cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
4269 }
4270
4271 - return iwl_mvm_send_cmd_pdu(mvm,
4272 - WIDE_ID(DATA_PATH_GROUP,
4273 - RFH_QUEUE_CONFIG_CMD),
4274 - 0, size, cmd);
4275 + hcmd.data[0] = cmd;
4276 + hcmd.len[0] = size;
4277 +
4278 + ret = iwl_mvm_send_cmd(mvm, &hcmd);
4279 +
4280 + kfree(cmd);
4281 +
4282 + return ret;
4283 }
4284
4285 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
4286 diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
4287 index 9e850c25877b..c596c7b13504 100644
4288 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
4289 +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
4290 @@ -499,7 +499,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
4291 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
4292 struct iwl_rb_allocator *rba = &trans_pcie->rba;
4293 struct list_head local_empty;
4294 - int pending = atomic_xchg(&rba->req_pending, 0);
4295 + int pending = atomic_read(&rba->req_pending);
4296
4297 IWL_DEBUG_RX(trans, "Pending allocation requests = %d\n", pending);
4298
4299 @@ -554,11 +554,13 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
4300 i++;
4301 }
4302
4303 + atomic_dec(&rba->req_pending);
4304 pending--;
4305 +
4306 if (!pending) {
4307 - pending = atomic_xchg(&rba->req_pending, 0);
4308 + pending = atomic_read(&rba->req_pending);
4309 IWL_DEBUG_RX(trans,
4310 - "Pending allocation requests = %d\n",
4311 + "Got more pending allocation requests = %d\n",
4312 pending);
4313 }
4314
4315 @@ -570,12 +572,15 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
4316 spin_unlock(&rba->lock);
4317
4318 atomic_inc(&rba->req_ready);
4319 +
4320 }
4321
4322 spin_lock(&rba->lock);
4323 /* return unused rbds to the allocator empty list */
4324 list_splice_tail(&local_empty, &rba->rbd_empty);
4325 spin_unlock(&rba->lock);
4326 +
4327 + IWL_DEBUG_RX(trans, "%s, exit.\n", __func__);
4328 }
4329
4330 /*
4331 diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
4332 index 1467af22e394..883752f640b4 100644
4333 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
4334 +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
4335 @@ -4310,11 +4310,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
4336 wiphy->mgmt_stypes = mwifiex_mgmt_stypes;
4337 wiphy->max_remain_on_channel_duration = 5000;
4338 wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
4339 - BIT(NL80211_IFTYPE_ADHOC) |
4340 BIT(NL80211_IFTYPE_P2P_CLIENT) |
4341 BIT(NL80211_IFTYPE_P2P_GO) |
4342 BIT(NL80211_IFTYPE_AP);
4343
4344 + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
4345 + wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
4346 +
4347 wiphy->bands[NL80211_BAND_2GHZ] = &mwifiex_band_2ghz;
4348 if (adapter->config_bands & BAND_A)
4349 wiphy->bands[NL80211_BAND_5GHZ] = &mwifiex_band_5ghz;
4350 @@ -4374,11 +4376,13 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
4351 wiphy->available_antennas_tx = BIT(adapter->number_of_antenna) - 1;
4352 wiphy->available_antennas_rx = BIT(adapter->number_of_antenna) - 1;
4353
4354 - wiphy->features |= NL80211_FEATURE_HT_IBSS |
4355 - NL80211_FEATURE_INACTIVITY_TIMER |
4356 + wiphy->features |= NL80211_FEATURE_INACTIVITY_TIMER |
4357 NL80211_FEATURE_LOW_PRIORITY_SCAN |
4358 NL80211_FEATURE_NEED_OBSS_SCAN;
4359
4360 + if (ISSUPP_ADHOC_ENABLED(adapter->fw_cap_info))
4361 + wiphy->features |= NL80211_FEATURE_HT_IBSS;
4362 +
4363 if (ISSUPP_RANDOM_MAC(adapter->fw_cap_info))
4364 wiphy->features |= NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR |
4365 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR |
4366 diff --git a/drivers/net/wireless/mediatek/mt76/eeprom.c b/drivers/net/wireless/mediatek/mt76/eeprom.c
4367 index 530e5593765c..a1529920d877 100644
4368 --- a/drivers/net/wireless/mediatek/mt76/eeprom.c
4369 +++ b/drivers/net/wireless/mediatek/mt76/eeprom.c
4370 @@ -54,22 +54,30 @@ mt76_get_of_eeprom(struct mt76_dev *dev, int len)
4371 part = np->name;
4372
4373 mtd = get_mtd_device_nm(part);
4374 - if (IS_ERR(mtd))
4375 - return PTR_ERR(mtd);
4376 + if (IS_ERR(mtd)) {
4377 + ret = PTR_ERR(mtd);
4378 + goto out_put_node;
4379 + }
4380
4381 - if (size <= sizeof(*list))
4382 - return -EINVAL;
4383 + if (size <= sizeof(*list)) {
4384 + ret = -EINVAL;
4385 + goto out_put_node;
4386 + }
4387
4388 offset = be32_to_cpup(list);
4389 ret = mtd_read(mtd, offset, len, &retlen, dev->eeprom.data);
4390 put_mtd_device(mtd);
4391 if (ret)
4392 - return ret;
4393 + goto out_put_node;
4394
4395 - if (retlen < len)
4396 - return -EINVAL;
4397 + if (retlen < len) {
4398 + ret = -EINVAL;
4399 + goto out_put_node;
4400 + }
4401
4402 - return 0;
4403 +out_put_node:
4404 + of_node_put(np);
4405 + return ret;
4406 #else
4407 return -ENOENT;
4408 #endif
4409 diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
4410 index 09923cedd039..61cde0f9f58f 100644
4411 --- a/drivers/net/wireless/mediatek/mt76/usb.c
4412 +++ b/drivers/net/wireless/mediatek/mt76/usb.c
4413 @@ -837,16 +837,9 @@ int mt76u_alloc_queues(struct mt76_dev *dev)
4414
4415 err = mt76u_alloc_rx(dev);
4416 if (err < 0)
4417 - goto err;
4418 -
4419 - err = mt76u_alloc_tx(dev);
4420 - if (err < 0)
4421 - goto err;
4422 + return err;
4423
4424 - return 0;
4425 -err:
4426 - mt76u_queues_deinit(dev);
4427 - return err;
4428 + return mt76u_alloc_tx(dev);
4429 }
4430 EXPORT_SYMBOL_GPL(mt76u_alloc_queues);
4431
4432 diff --git a/drivers/net/wireless/mediatek/mt7601u/eeprom.h b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
4433 index 662d12703b69..57b503ae63f1 100644
4434 --- a/drivers/net/wireless/mediatek/mt7601u/eeprom.h
4435 +++ b/drivers/net/wireless/mediatek/mt7601u/eeprom.h
4436 @@ -17,7 +17,7 @@
4437
4438 struct mt7601u_dev;
4439
4440 -#define MT7601U_EE_MAX_VER 0x0c
4441 +#define MT7601U_EE_MAX_VER 0x0d
4442 #define MT7601U_EEPROM_SIZE 256
4443
4444 #define MT7601U_DEFAULT_TX_POWER 6
4445 diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
4446 index 26b187336875..2e12de813a5b 100644
4447 --- a/drivers/net/wireless/ti/wlcore/main.c
4448 +++ b/drivers/net/wireless/ti/wlcore/main.c
4449 @@ -1085,8 +1085,11 @@ static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
4450 goto out;
4451
4452 ret = wl12xx_fetch_firmware(wl, plt);
4453 - if (ret < 0)
4454 - goto out;
4455 + if (ret < 0) {
4456 + kfree(wl->fw_status);
4457 + kfree(wl->raw_fw_status);
4458 + kfree(wl->tx_res_if);
4459 + }
4460
4461 out:
4462 return ret;
4463 diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
4464 index 89accc76d71c..c37d5bbd72ab 100644
4465 --- a/drivers/nvme/host/fc.c
4466 +++ b/drivers/nvme/host/fc.c
4467 @@ -3018,7 +3018,10 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
4468
4469 ctrl->ctrl.opts = opts;
4470 ctrl->ctrl.nr_reconnects = 0;
4471 - ctrl->ctrl.numa_node = dev_to_node(lport->dev);
4472 + if (lport->dev)
4473 + ctrl->ctrl.numa_node = dev_to_node(lport->dev);
4474 + else
4475 + ctrl->ctrl.numa_node = NUMA_NO_NODE;
4476 INIT_LIST_HEAD(&ctrl->ctrl_list);
4477 ctrl->lport = lport;
4478 ctrl->rport = rport;
4479 diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
4480 index 88d260f31835..02c63c463222 100644
4481 --- a/drivers/nvme/target/core.c
4482 +++ b/drivers/nvme/target/core.c
4483 @@ -1171,6 +1171,15 @@ static void nvmet_release_p2p_ns_map(struct nvmet_ctrl *ctrl)
4484 put_device(ctrl->p2p_client);
4485 }
4486
4487 +static void nvmet_fatal_error_handler(struct work_struct *work)
4488 +{
4489 + struct nvmet_ctrl *ctrl =
4490 + container_of(work, struct nvmet_ctrl, fatal_err_work);
4491 +
4492 + pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
4493 + ctrl->ops->delete_ctrl(ctrl);
4494 +}
4495 +
4496 u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
4497 struct nvmet_req *req, u32 kato, struct nvmet_ctrl **ctrlp)
4498 {
4499 @@ -1213,6 +1222,7 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn,
4500 INIT_WORK(&ctrl->async_event_work, nvmet_async_event_work);
4501 INIT_LIST_HEAD(&ctrl->async_events);
4502 INIT_RADIX_TREE(&ctrl->p2p_ns_map, GFP_KERNEL);
4503 + INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
4504
4505 memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE);
4506 memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE);
4507 @@ -1316,21 +1326,11 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl)
4508 kref_put(&ctrl->ref, nvmet_ctrl_free);
4509 }
4510
4511 -static void nvmet_fatal_error_handler(struct work_struct *work)
4512 -{
4513 - struct nvmet_ctrl *ctrl =
4514 - container_of(work, struct nvmet_ctrl, fatal_err_work);
4515 -
4516 - pr_err("ctrl %d fatal error occurred!\n", ctrl->cntlid);
4517 - ctrl->ops->delete_ctrl(ctrl);
4518 -}
4519 -
4520 void nvmet_ctrl_fatal_error(struct nvmet_ctrl *ctrl)
4521 {
4522 mutex_lock(&ctrl->lock);
4523 if (!(ctrl->csts & NVME_CSTS_CFS)) {
4524 ctrl->csts |= NVME_CSTS_CFS;
4525 - INIT_WORK(&ctrl->fatal_err_work, nvmet_fatal_error_handler);
4526 schedule_work(&ctrl->fatal_err_work);
4527 }
4528 mutex_unlock(&ctrl->lock);
4529 diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
4530 index 55e471c18e8d..c42fe5c4319f 100644
4531 --- a/drivers/pci/controller/pcie-mediatek.c
4532 +++ b/drivers/pci/controller/pcie-mediatek.c
4533 @@ -654,7 +654,6 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
4534 struct resource *mem = &pcie->mem;
4535 const struct mtk_pcie_soc *soc = port->pcie->soc;
4536 u32 val;
4537 - size_t size;
4538 int err;
4539
4540 /* MT7622 platforms need to enable LTSSM and ASPM from PCIe subsys */
4541 @@ -706,8 +705,8 @@ static int mtk_pcie_startup_port_v2(struct mtk_pcie_port *port)
4542 mtk_pcie_enable_msi(port);
4543
4544 /* Set AHB to PCIe translation windows */
4545 - size = mem->end - mem->start;
4546 - val = lower_32_bits(mem->start) | AHB2PCIE_SIZE(fls(size));
4547 + val = lower_32_bits(mem->start) |
4548 + AHB2PCIE_SIZE(fls(resource_size(mem)));
4549 writel(val, port->base + PCIE_AHB_TRANS_BASE0_L);
4550
4551 val = upper_32_bits(mem->start);
4552 diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
4553 index c0fb64ace05a..8bfcb8cd0900 100644
4554 --- a/drivers/pci/hotplug/pciehp_hpc.c
4555 +++ b/drivers/pci/hotplug/pciehp_hpc.c
4556 @@ -156,9 +156,9 @@ static void pcie_do_write_cmd(struct controller *ctrl, u16 cmd,
4557 slot_ctrl |= (cmd & mask);
4558 ctrl->cmd_busy = 1;
4559 smp_mb();
4560 + ctrl->slot_ctrl = slot_ctrl;
4561 pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, slot_ctrl);
4562 ctrl->cmd_started = jiffies;
4563 - ctrl->slot_ctrl = slot_ctrl;
4564
4565 /*
4566 * Controllers with the Intel CF118 and similar errata advertise
4567 diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
4568 index 1a8b85051b1b..efa5b552914b 100644
4569 --- a/drivers/pci/pcie/pme.c
4570 +++ b/drivers/pci/pcie/pme.c
4571 @@ -363,6 +363,16 @@ static bool pcie_pme_check_wakeup(struct pci_bus *bus)
4572 return false;
4573 }
4574
4575 +static void pcie_pme_disable_interrupt(struct pci_dev *port,
4576 + struct pcie_pme_service_data *data)
4577 +{
4578 + spin_lock_irq(&data->lock);
4579 + pcie_pme_interrupt_enable(port, false);
4580 + pcie_clear_root_pme_status(port);
4581 + data->noirq = true;
4582 + spin_unlock_irq(&data->lock);
4583 +}
4584 +
4585 /**
4586 * pcie_pme_suspend - Suspend PCIe PME service device.
4587 * @srv: PCIe service device to suspend.
4588 @@ -387,11 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
4589 return 0;
4590 }
4591
4592 - spin_lock_irq(&data->lock);
4593 - pcie_pme_interrupt_enable(port, false);
4594 - pcie_clear_root_pme_status(port);
4595 - data->noirq = true;
4596 - spin_unlock_irq(&data->lock);
4597 + pcie_pme_disable_interrupt(port, data);
4598
4599 synchronize_irq(srv->irq);
4600
4601 @@ -427,9 +433,11 @@ static int pcie_pme_resume(struct pcie_device *srv)
4602 */
4603 static void pcie_pme_remove(struct pcie_device *srv)
4604 {
4605 - pcie_pme_suspend(srv);
4606 + struct pcie_pme_service_data *data = get_service_data(srv);
4607 +
4608 + pcie_pme_disable_interrupt(srv->port, data);
4609 free_irq(srv->irq, srv);
4610 - kfree(get_service_data(srv));
4611 + kfree(data);
4612 }
4613
4614 static struct pcie_port_service_driver pcie_pme_driver = {
4615 diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c
4616 index 8e46a9dad2fa..7cb766dafe85 100644
4617 --- a/drivers/perf/arm_spe_pmu.c
4618 +++ b/drivers/perf/arm_spe_pmu.c
4619 @@ -824,10 +824,10 @@ static void arm_spe_pmu_read(struct perf_event *event)
4620 {
4621 }
4622
4623 -static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages,
4624 - bool snapshot)
4625 +static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages,
4626 + int nr_pages, bool snapshot)
4627 {
4628 - int i;
4629 + int i, cpu = event->cpu;
4630 struct page **pglist;
4631 struct arm_spe_pmu_buf *buf;
4632
4633 diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c
4634 index ea87d739f534..a4ae1ac5369e 100644
4635 --- a/drivers/pinctrl/meson/pinctrl-meson.c
4636 +++ b/drivers/pinctrl/meson/pinctrl-meson.c
4637 @@ -31,6 +31,9 @@
4638 * In some cases the register ranges for pull enable and pull
4639 * direction are the same and thus there are only 3 register ranges.
4640 *
4641 + * Since Meson G12A SoC, the ao register ranges for gpio, pull enable
4642 + * and pull direction are the same, so there are only 2 register ranges.
4643 + *
4644 * For the pull and GPIO configuration every bank uses a contiguous
4645 * set of bits in the register sets described above; the same register
4646 * can be shared by more banks with different offsets.
4647 @@ -488,23 +491,22 @@ static int meson_pinctrl_parse_dt(struct meson_pinctrl *pc,
4648 return PTR_ERR(pc->reg_mux);
4649 }
4650
4651 - pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
4652 - if (IS_ERR(pc->reg_pull)) {
4653 - dev_err(pc->dev, "pull registers not found\n");
4654 - return PTR_ERR(pc->reg_pull);
4655 + pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
4656 + if (IS_ERR(pc->reg_gpio)) {
4657 + dev_err(pc->dev, "gpio registers not found\n");
4658 + return PTR_ERR(pc->reg_gpio);
4659 }
4660
4661 + pc->reg_pull = meson_map_resource(pc, gpio_np, "pull");
4662 + /* Use gpio region if pull one is not present */
4663 + if (IS_ERR(pc->reg_pull))
4664 + pc->reg_pull = pc->reg_gpio;
4665 +
4666 pc->reg_pullen = meson_map_resource(pc, gpio_np, "pull-enable");
4667 /* Use pull region if pull-enable one is not present */
4668 if (IS_ERR(pc->reg_pullen))
4669 pc->reg_pullen = pc->reg_pull;
4670
4671 - pc->reg_gpio = meson_map_resource(pc, gpio_np, "gpio");
4672 - if (IS_ERR(pc->reg_gpio)) {
4673 - dev_err(pc->dev, "gpio registers not found\n");
4674 - return PTR_ERR(pc->reg_gpio);
4675 - }
4676 -
4677 return 0;
4678 }
4679
4680 diff --git a/drivers/pinctrl/meson/pinctrl-meson8b.c b/drivers/pinctrl/meson/pinctrl-meson8b.c
4681 index 0f140a802137..7f76000cc12e 100644
4682 --- a/drivers/pinctrl/meson/pinctrl-meson8b.c
4683 +++ b/drivers/pinctrl/meson/pinctrl-meson8b.c
4684 @@ -346,6 +346,8 @@ static const unsigned int eth_rx_dv_pins[] = { DIF_1_P };
4685 static const unsigned int eth_rx_clk_pins[] = { DIF_1_N };
4686 static const unsigned int eth_txd0_1_pins[] = { DIF_2_P };
4687 static const unsigned int eth_txd1_1_pins[] = { DIF_2_N };
4688 +static const unsigned int eth_rxd3_pins[] = { DIF_2_P };
4689 +static const unsigned int eth_rxd2_pins[] = { DIF_2_N };
4690 static const unsigned int eth_tx_en_pins[] = { DIF_3_P };
4691 static const unsigned int eth_ref_clk_pins[] = { DIF_3_N };
4692 static const unsigned int eth_mdc_pins[] = { DIF_4_P };
4693 @@ -599,6 +601,8 @@ static struct meson_pmx_group meson8b_cbus_groups[] = {
4694 GROUP(eth_ref_clk, 6, 8),
4695 GROUP(eth_mdc, 6, 9),
4696 GROUP(eth_mdio_en, 6, 10),
4697 + GROUP(eth_rxd3, 7, 22),
4698 + GROUP(eth_rxd2, 7, 23),
4699 };
4700
4701 static struct meson_pmx_group meson8b_aobus_groups[] = {
4702 @@ -748,7 +752,7 @@ static const char * const ethernet_groups[] = {
4703 "eth_tx_clk", "eth_tx_en", "eth_txd1_0", "eth_txd1_1",
4704 "eth_txd0_0", "eth_txd0_1", "eth_rx_clk", "eth_rx_dv",
4705 "eth_rxd1", "eth_rxd0", "eth_mdio_en", "eth_mdc", "eth_ref_clk",
4706 - "eth_txd2", "eth_txd3"
4707 + "eth_txd2", "eth_txd3", "eth_rxd3", "eth_rxd2"
4708 };
4709
4710 static const char * const i2c_a_groups[] = {
4711 diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
4712 index e40908dc37e0..1ce286f7b286 100644
4713 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
4714 +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77990.c
4715 @@ -391,29 +391,33 @@ FM(IP12_23_20) IP12_23_20 FM(IP13_23_20) IP13_23_20 FM(IP14_23_20) IP14_23_20 FM
4716 FM(IP12_27_24) IP12_27_24 FM(IP13_27_24) IP13_27_24 FM(IP14_27_24) IP14_27_24 FM(IP15_27_24) IP15_27_24 \
4717 FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM(IP15_31_28) IP15_31_28
4718
4719 +/* The bit numbering in MOD_SEL fields is reversed */
4720 +#define REV4(f0, f1, f2, f3) f0 f2 f1 f3
4721 +#define REV8(f0, f1, f2, f3, f4, f5, f6, f7) f0 f4 f2 f6 f1 f5 f3 f7
4722 +
4723 /* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
4724 -#define MOD_SEL0_30_29 FM(SEL_ADGB_0) FM(SEL_ADGB_1) FM(SEL_ADGB_2) F_(0, 0)
4725 +#define MOD_SEL0_30_29 REV4(FM(SEL_ADGB_0), FM(SEL_ADGB_1), FM(SEL_ADGB_2), F_(0, 0))
4726 #define MOD_SEL0_28 FM(SEL_DRIF0_0) FM(SEL_DRIF0_1)
4727 -#define MOD_SEL0_27_26 FM(SEL_FM_0) FM(SEL_FM_1) FM(SEL_FM_2) F_(0, 0)
4728 +#define MOD_SEL0_27_26 REV4(FM(SEL_FM_0), FM(SEL_FM_1), FM(SEL_FM_2), F_(0, 0))
4729 #define MOD_SEL0_25 FM(SEL_FSO_0) FM(SEL_FSO_1)
4730 #define MOD_SEL0_24 FM(SEL_HSCIF0_0) FM(SEL_HSCIF0_1)
4731 #define MOD_SEL0_23 FM(SEL_HSCIF1_0) FM(SEL_HSCIF1_1)
4732 #define MOD_SEL0_22 FM(SEL_HSCIF2_0) FM(SEL_HSCIF2_1)
4733 -#define MOD_SEL0_21_20 FM(SEL_I2C1_0) FM(SEL_I2C1_1) FM(SEL_I2C1_2) FM(SEL_I2C1_3)
4734 -#define MOD_SEL0_19_18_17 FM(SEL_I2C2_0) FM(SEL_I2C2_1) FM(SEL_I2C2_2) FM(SEL_I2C2_3) FM(SEL_I2C2_4) F_(0, 0) F_(0, 0) F_(0, 0)
4735 +#define MOD_SEL0_21_20 REV4(FM(SEL_I2C1_0), FM(SEL_I2C1_1), FM(SEL_I2C1_2), FM(SEL_I2C1_3))
4736 +#define MOD_SEL0_19_18_17 REV8(FM(SEL_I2C2_0), FM(SEL_I2C2_1), FM(SEL_I2C2_2), FM(SEL_I2C2_3), FM(SEL_I2C2_4), F_(0, 0), F_(0, 0), F_(0, 0))
4737 #define MOD_SEL0_16 FM(SEL_NDFC_0) FM(SEL_NDFC_1)
4738 #define MOD_SEL0_15 FM(SEL_PWM0_0) FM(SEL_PWM0_1)
4739 #define MOD_SEL0_14 FM(SEL_PWM1_0) FM(SEL_PWM1_1)
4740 -#define MOD_SEL0_13_12 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0)
4741 -#define MOD_SEL0_11_10 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0)
4742 +#define MOD_SEL0_13_12 REV4(FM(SEL_PWM2_0), FM(SEL_PWM2_1), FM(SEL_PWM2_2), F_(0, 0))
4743 +#define MOD_SEL0_11_10 REV4(FM(SEL_PWM3_0), FM(SEL_PWM3_1), FM(SEL_PWM3_2), F_(0, 0))
4744 #define MOD_SEL0_9 FM(SEL_PWM4_0) FM(SEL_PWM4_1)
4745 #define MOD_SEL0_8 FM(SEL_PWM5_0) FM(SEL_PWM5_1)
4746 #define MOD_SEL0_7 FM(SEL_PWM6_0) FM(SEL_PWM6_1)
4747 -#define MOD_SEL0_6_5 FM(SEL_REMOCON_0) FM(SEL_REMOCON_1) FM(SEL_REMOCON_2) F_(0, 0)
4748 +#define MOD_SEL0_6_5 REV4(FM(SEL_REMOCON_0), FM(SEL_REMOCON_1), FM(SEL_REMOCON_2), F_(0, 0))
4749 #define MOD_SEL0_4 FM(SEL_SCIF_0) FM(SEL_SCIF_1)
4750 #define MOD_SEL0_3 FM(SEL_SCIF0_0) FM(SEL_SCIF0_1)
4751 #define MOD_SEL0_2 FM(SEL_SCIF2_0) FM(SEL_SCIF2_1)
4752 -#define MOD_SEL0_1_0 FM(SEL_SPEED_PULSE_IF_0) FM(SEL_SPEED_PULSE_IF_1) FM(SEL_SPEED_PULSE_IF_2) F_(0, 0)
4753 +#define MOD_SEL0_1_0 REV4(FM(SEL_SPEED_PULSE_IF_0), FM(SEL_SPEED_PULSE_IF_1), FM(SEL_SPEED_PULSE_IF_2), F_(0, 0))
4754
4755 /* MOD_SEL1 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */ /* 4 */ /* 5 */ /* 6 */ /* 7 */
4756 #define MOD_SEL1_31 FM(SEL_SIMCARD_0) FM(SEL_SIMCARD_1)
4757 @@ -422,18 +426,18 @@ FM(IP12_31_28) IP12_31_28 FM(IP13_31_28) IP13_31_28 FM(IP14_31_28) IP14_31_28 FM
4758 #define MOD_SEL1_28 FM(SEL_USB_20_CH0_0) FM(SEL_USB_20_CH0_1)
4759 #define MOD_SEL1_26 FM(SEL_DRIF2_0) FM(SEL_DRIF2_1)
4760 #define MOD_SEL1_25 FM(SEL_DRIF3_0) FM(SEL_DRIF3_1)
4761 -#define MOD_SEL1_24_23_22 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1) FM(SEL_HSCIF3_2) FM(SEL_HSCIF3_3) FM(SEL_HSCIF3_4) F_(0, 0) F_(0, 0) F_(0, 0)
4762 -#define MOD_SEL1_21_20_19 FM(SEL_HSCIF4_0) FM(SEL_HSCIF4_1) FM(SEL_HSCIF4_2) FM(SEL_HSCIF4_3) FM(SEL_HSCIF4_4) F_(0, 0) F_(0, 0) F_(0, 0)
4763 +#define MOD_SEL1_24_23_22 REV8(FM(SEL_HSCIF3_0), FM(SEL_HSCIF3_1), FM(SEL_HSCIF3_2), FM(SEL_HSCIF3_3), FM(SEL_HSCIF3_4), F_(0, 0), F_(0, 0), F_(0, 0))
4764 +#define MOD_SEL1_21_20_19 REV8(FM(SEL_HSCIF4_0), FM(SEL_HSCIF4_1), FM(SEL_HSCIF4_2), FM(SEL_HSCIF4_3), FM(SEL_HSCIF4_4), F_(0, 0), F_(0, 0), F_(0, 0))
4765 #define MOD_SEL1_18 FM(SEL_I2C6_0) FM(SEL_I2C6_1)
4766 #define MOD_SEL1_17 FM(SEL_I2C7_0) FM(SEL_I2C7_1)
4767 #define MOD_SEL1_16 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1)
4768 #define MOD_SEL1_15 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1)
4769 -#define MOD_SEL1_14_13 FM(SEL_SCIF3_0) FM(SEL_SCIF3_1) FM(SEL_SCIF3_2) F_(0, 0)
4770 -#define MOD_SEL1_12_11 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1) FM(SEL_SCIF4_2) F_(0, 0)
4771 -#define MOD_SEL1_10_9 FM(SEL_SCIF5_0) FM(SEL_SCIF5_1) FM(SEL_SCIF5_2) F_(0, 0)
4772 +#define MOD_SEL1_14_13 REV4(FM(SEL_SCIF3_0), FM(SEL_SCIF3_1), FM(SEL_SCIF3_2), F_(0, 0))
4773 +#define MOD_SEL1_12_11 REV4(FM(SEL_SCIF4_0), FM(SEL_SCIF4_1), FM(SEL_SCIF4_2), F_(0, 0))
4774 +#define MOD_SEL1_10_9 REV4(FM(SEL_SCIF5_0), FM(SEL_SCIF5_1), FM(SEL_SCIF5_2), F_(0, 0))
4775 #define MOD_SEL1_8 FM(SEL_VIN4_0) FM(SEL_VIN4_1)
4776 #define MOD_SEL1_7 FM(SEL_VIN5_0) FM(SEL_VIN5_1)
4777 -#define MOD_SEL1_6_5 FM(SEL_ADGC_0) FM(SEL_ADGC_1) FM(SEL_ADGC_2) F_(0, 0)
4778 +#define MOD_SEL1_6_5 REV4(FM(SEL_ADGC_0), FM(SEL_ADGC_1), FM(SEL_ADGC_2), F_(0, 0))
4779 #define MOD_SEL1_4 FM(SEL_SSI9_0) FM(SEL_SSI9_1)
4780
4781 #define PINMUX_MOD_SELS \
4782 diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
4783 index 84d78db381e3..9e377e3b9cb3 100644
4784 --- a/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
4785 +++ b/drivers/pinctrl/sh-pfc/pfc-r8a77995.c
4786 @@ -381,6 +381,9 @@ FM(IP12_23_20) IP12_23_20 \
4787 FM(IP12_27_24) IP12_27_24 \
4788 FM(IP12_31_28) IP12_31_28 \
4789
4790 +/* The bit numbering in MOD_SEL fields is reversed */
4791 +#define REV4(f0, f1, f2, f3) f0 f2 f1 f3
4792 +
4793 /* MOD_SEL0 */ /* 0 */ /* 1 */ /* 2 */ /* 3 */
4794 #define MOD_SEL0_30 FM(SEL_MSIOF2_0) FM(SEL_MSIOF2_1)
4795 #define MOD_SEL0_29 FM(SEL_I2C3_0) FM(SEL_I2C3_1)
4796 @@ -388,10 +391,10 @@ FM(IP12_31_28) IP12_31_28 \
4797 #define MOD_SEL0_27 FM(SEL_MSIOF3_0) FM(SEL_MSIOF3_1)
4798 #define MOD_SEL0_26 FM(SEL_HSCIF3_0) FM(SEL_HSCIF3_1)
4799 #define MOD_SEL0_25 FM(SEL_SCIF4_0) FM(SEL_SCIF4_1)
4800 -#define MOD_SEL0_24_23 FM(SEL_PWM0_0) FM(SEL_PWM0_1) FM(SEL_PWM0_2) F_(0, 0)
4801 -#define MOD_SEL0_22_21 FM(SEL_PWM1_0) FM(SEL_PWM1_1) FM(SEL_PWM1_2) F_(0, 0)
4802 -#define MOD_SEL0_20_19 FM(SEL_PWM2_0) FM(SEL_PWM2_1) FM(SEL_PWM2_2) F_(0, 0)
4803 -#define MOD_SEL0_18_17 FM(SEL_PWM3_0) FM(SEL_PWM3_1) FM(SEL_PWM3_2) F_(0, 0)
4804 +#define MOD_SEL0_24_23 REV4(FM(SEL_PWM0_0), FM(SEL_PWM0_1), FM(SEL_PWM0_2), F_(0, 0))
4805 +#define MOD_SEL0_22_21 REV4(FM(SEL_PWM1_0), FM(SEL_PWM1_1), FM(SEL_PWM1_2), F_(0, 0))
4806 +#define MOD_SEL0_20_19 REV4(FM(SEL_PWM2_0), FM(SEL_PWM2_1), FM(SEL_PWM2_2), F_(0, 0))
4807 +#define MOD_SEL0_18_17 REV4(FM(SEL_PWM3_0), FM(SEL_PWM3_1), FM(SEL_PWM3_2), F_(0, 0))
4808 #define MOD_SEL0_15 FM(SEL_IRQ_0_0) FM(SEL_IRQ_0_1)
4809 #define MOD_SEL0_14 FM(SEL_IRQ_1_0) FM(SEL_IRQ_1_1)
4810 #define MOD_SEL0_13 FM(SEL_IRQ_2_0) FM(SEL_IRQ_2_1)
4811 diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c
4812 index b6d44550d98c..eca16d00e310 100644
4813 --- a/drivers/platform/mellanox/mlxreg-hotplug.c
4814 +++ b/drivers/platform/mellanox/mlxreg-hotplug.c
4815 @@ -248,7 +248,8 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
4816 struct mlxreg_core_item *item)
4817 {
4818 struct mlxreg_core_data *data;
4819 - u32 asserted, regval, bit;
4820 + unsigned long asserted;
4821 + u32 regval, bit;
4822 int ret;
4823
4824 /*
4825 @@ -281,7 +282,7 @@ mlxreg_hotplug_work_helper(struct mlxreg_hotplug_priv_data *priv,
4826 asserted = item->cache ^ regval;
4827 item->cache = regval;
4828
4829 - for_each_set_bit(bit, (unsigned long *)&asserted, 8) {
4830 + for_each_set_bit(bit, &asserted, 8) {
4831 data = item->data + bit;
4832 if (regval & BIT(bit)) {
4833 if (item->inversed)
4834 diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
4835 index 1589dffab9fa..8b53a9ceb897 100644
4836 --- a/drivers/platform/x86/ideapad-laptop.c
4837 +++ b/drivers/platform/x86/ideapad-laptop.c
4838 @@ -989,7 +989,7 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
4839 .ident = "Lenovo RESCUER R720-15IKBN",
4840 .matches = {
4841 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
4842 - DMI_MATCH(DMI_BOARD_NAME, "80WW"),
4843 + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo R720-15IKBN"),
4844 },
4845 },
4846 {
4847 diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
4848 index e28bcf61b126..bc0d55a59015 100644
4849 --- a/drivers/platform/x86/intel-hid.c
4850 +++ b/drivers/platform/x86/intel-hid.c
4851 @@ -363,7 +363,7 @@ wakeup:
4852 * the 5-button array, but still send notifies with power button
4853 * event code to this device object on power button actions.
4854 *
4855 - * Report the power button press; catch and ignore the button release.
4856 + * Report the power button press and release.
4857 */
4858 if (!priv->array) {
4859 if (event == 0xce) {
4860 @@ -372,8 +372,11 @@ wakeup:
4861 return;
4862 }
4863
4864 - if (event == 0xcf)
4865 + if (event == 0xcf) {
4866 + input_report_key(priv->input_dev, KEY_POWER, 0);
4867 + input_sync(priv->input_dev);
4868 return;
4869 + }
4870 }
4871
4872 /* 0xC0 is for HID events, other values are for 5 button array */
4873 diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c
4874 index 22dbf115782e..c37e74ee609d 100644
4875 --- a/drivers/platform/x86/intel_pmc_core.c
4876 +++ b/drivers/platform/x86/intel_pmc_core.c
4877 @@ -380,7 +380,8 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
4878 index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++)
4879 pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter);
4880
4881 - for (index = 0; map[index].name; index++)
4882 + for (index = 0; map[index].name &&
4883 + index < pmcdev->map->ppfear_buckets * 8; index++)
4884 pmc_core_display_map(s, index, pf_regs[index / 8], map);
4885
4886 return 0;
4887 diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h
4888 index 89554cba5758..1a0104d2cbf0 100644
4889 --- a/drivers/platform/x86/intel_pmc_core.h
4890 +++ b/drivers/platform/x86/intel_pmc_core.h
4891 @@ -32,7 +32,7 @@
4892 #define SPT_PMC_SLP_S0_RES_COUNTER_STEP 0x64
4893 #define PMC_BASE_ADDR_MASK ~(SPT_PMC_MMIO_REG_LEN - 1)
4894 #define MTPMC_MASK 0xffff0000
4895 -#define PPFEAR_MAX_NUM_ENTRIES 5
4896 +#define PPFEAR_MAX_NUM_ENTRIES 12
4897 #define SPT_PPFEAR_NUM_ENTRIES 5
4898 #define SPT_PMC_READ_DISABLE_BIT 0x16
4899 #define SPT_PMC_MSG_FULL_STS_BIT 0x18
4900 diff --git a/drivers/regulator/act8865-regulator.c b/drivers/regulator/act8865-regulator.c
4901 index 21e20483bd91..e0239cf3f56d 100644
4902 --- a/drivers/regulator/act8865-regulator.c
4903 +++ b/drivers/regulator/act8865-regulator.c
4904 @@ -131,7 +131,7 @@
4905 * ACT8865 voltage number
4906 */
4907 #define ACT8865_VOLTAGE_NUM 64
4908 -#define ACT8600_SUDCDC_VOLTAGE_NUM 255
4909 +#define ACT8600_SUDCDC_VOLTAGE_NUM 256
4910
4911 struct act8865 {
4912 struct regmap *regmap;
4913 @@ -222,7 +222,8 @@ static const struct regulator_linear_range act8600_sudcdc_voltage_ranges[] = {
4914 REGULATOR_LINEAR_RANGE(3000000, 0, 63, 0),
4915 REGULATOR_LINEAR_RANGE(3000000, 64, 159, 100000),
4916 REGULATOR_LINEAR_RANGE(12600000, 160, 191, 200000),
4917 - REGULATOR_LINEAR_RANGE(19000000, 191, 255, 400000),
4918 + REGULATOR_LINEAR_RANGE(19000000, 192, 247, 400000),
4919 + REGULATOR_LINEAR_RANGE(41400000, 248, 255, 0),
4920 };
4921
4922 static struct regulator_ops act8865_ops = {
4923 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
4924 index b9d7b45c7295..e2caf11598c7 100644
4925 --- a/drivers/regulator/core.c
4926 +++ b/drivers/regulator/core.c
4927 @@ -1349,7 +1349,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
4928 * We'll only apply the initial system load if an
4929 * initial mode wasn't specified.
4930 */
4931 + regulator_lock(rdev);
4932 drms_uA_update(rdev);
4933 + regulator_unlock(rdev);
4934 }
4935
4936 if ((rdev->constraints->ramp_delay || rdev->constraints->ramp_disable)
4937 diff --git a/drivers/regulator/mcp16502.c b/drivers/regulator/mcp16502.c
4938 index 3479ae009b0b..0fc4963bd5b0 100644
4939 --- a/drivers/regulator/mcp16502.c
4940 +++ b/drivers/regulator/mcp16502.c
4941 @@ -17,6 +17,7 @@
4942 #include <linux/regmap.h>
4943 #include <linux/regulator/driver.h>
4944 #include <linux/suspend.h>
4945 +#include <linux/gpio/consumer.h>
4946
4947 #define VDD_LOW_SEL 0x0D
4948 #define VDD_HIGH_SEL 0x3F
4949 diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
4950 index ed8e58f09054..3e132592c1fe 100644
4951 --- a/drivers/s390/net/ism_drv.c
4952 +++ b/drivers/s390/net/ism_drv.c
4953 @@ -141,10 +141,13 @@ static int register_ieq(struct ism_dev *ism)
4954
4955 static int unregister_sba(struct ism_dev *ism)
4956 {
4957 + int ret;
4958 +
4959 if (!ism->sba)
4960 return 0;
4961
4962 - if (ism_cmd_simple(ism, ISM_UNREG_SBA))
4963 + ret = ism_cmd_simple(ism, ISM_UNREG_SBA);
4964 + if (ret && ret != ISM_ERROR)
4965 return -EIO;
4966
4967 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
4968 @@ -158,10 +161,13 @@ static int unregister_sba(struct ism_dev *ism)
4969
4970 static int unregister_ieq(struct ism_dev *ism)
4971 {
4972 + int ret;
4973 +
4974 if (!ism->ieq)
4975 return 0;
4976
4977 - if (ism_cmd_simple(ism, ISM_UNREG_IEQ))
4978 + ret = ism_cmd_simple(ism, ISM_UNREG_IEQ);
4979 + if (ret && ret != ISM_ERROR)
4980 return -EIO;
4981
4982 dma_free_coherent(&ism->pdev->dev, PAGE_SIZE,
4983 @@ -287,7 +293,7 @@ static int ism_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb)
4984 cmd.request.dmb_tok = dmb->dmb_tok;
4985
4986 ret = ism_cmd(ism, &cmd);
4987 - if (ret)
4988 + if (ret && ret != ISM_ERROR)
4989 goto out;
4990
4991 ism_free_dmb(ism, dmb);
4992 diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
4993 index 2e4e7159ebf9..a75e74ad1698 100644
4994 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
4995 +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
4996 @@ -1438,7 +1438,7 @@ bind_err:
4997 static struct bnx2fc_interface *
4998 bnx2fc_interface_create(struct bnx2fc_hba *hba,
4999 struct net_device *netdev,
5000 - enum fip_state fip_mode)
5001 + enum fip_mode fip_mode)
5002 {
5003 struct fcoe_ctlr_device *ctlr_dev;
5004 struct bnx2fc_interface *interface;
5005 diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
5006 index cd19be3f3405..8ba8862d3292 100644
5007 --- a/drivers/scsi/fcoe/fcoe.c
5008 +++ b/drivers/scsi/fcoe/fcoe.c
5009 @@ -389,7 +389,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
5010 * Returns: pointer to a struct fcoe_interface or NULL on error
5011 */
5012 static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
5013 - enum fip_state fip_mode)
5014 + enum fip_mode fip_mode)
5015 {
5016 struct fcoe_ctlr_device *ctlr_dev;
5017 struct fcoe_ctlr *ctlr;
5018 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
5019 index 54da3166da8d..7dc4ffa24430 100644
5020 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
5021 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
5022 @@ -147,7 +147,7 @@ static void fcoe_ctlr_map_dest(struct fcoe_ctlr *fip)
5023 * fcoe_ctlr_init() - Initialize the FCoE Controller instance
5024 * @fip: The FCoE controller to initialize
5025 */
5026 -void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
5027 +void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_mode mode)
5028 {
5029 fcoe_ctlr_set_state(fip, FIP_ST_LINK_WAIT);
5030 fip->mode = mode;
5031 @@ -454,7 +454,10 @@ void fcoe_ctlr_link_up(struct fcoe_ctlr *fip)
5032 mutex_unlock(&fip->ctlr_mutex);
5033 fc_linkup(fip->lp);
5034 } else if (fip->state == FIP_ST_LINK_WAIT) {
5035 - fcoe_ctlr_set_state(fip, fip->mode);
5036 + if (fip->mode == FIP_MODE_NON_FIP)
5037 + fcoe_ctlr_set_state(fip, FIP_ST_NON_FIP);
5038 + else
5039 + fcoe_ctlr_set_state(fip, FIP_ST_AUTO);
5040 switch (fip->mode) {
5041 default:
5042 LIBFCOE_FIP_DBG(fip, "invalid mode %d\n", fip->mode);
5043 diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
5044 index f4909cd206d3..f15d5e1d56b1 100644
5045 --- a/drivers/scsi/fcoe/fcoe_transport.c
5046 +++ b/drivers/scsi/fcoe/fcoe_transport.c
5047 @@ -873,7 +873,7 @@ static int fcoe_transport_create(const char *buffer,
5048 int rc = -ENODEV;
5049 struct net_device *netdev = NULL;
5050 struct fcoe_transport *ft = NULL;
5051 - enum fip_state fip_mode = (enum fip_state)(long)kp->arg;
5052 + enum fip_mode fip_mode = (enum fip_mode)kp->arg;
5053
5054 mutex_lock(&ft_mutex);
5055
5056 diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
5057 index bc17fa0d8375..62d158574281 100644
5058 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c
5059 +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
5060 @@ -10,6 +10,7 @@
5061 */
5062
5063 #include "hisi_sas.h"
5064 +#include "../libsas/sas_internal.h"
5065 #define DRV_NAME "hisi_sas"
5066
5067 #define DEV_IS_GONE(dev) \
5068 @@ -872,7 +873,8 @@ static void hisi_sas_do_release_task(struct hisi_hba *hisi_hba, struct sas_task
5069 spin_lock_irqsave(&task->task_state_lock, flags);
5070 task->task_state_flags &=
5071 ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
5072 - task->task_state_flags |= SAS_TASK_STATE_DONE;
5073 + if (!slot->is_internal && task->task_proto != SAS_PROTOCOL_SMP)
5074 + task->task_state_flags |= SAS_TASK_STATE_DONE;
5075 spin_unlock_irqrestore(&task->task_state_lock, flags);
5076 }
5077
5078 @@ -1972,9 +1974,18 @@ static int hisi_sas_write_gpio(struct sas_ha_struct *sha, u8 reg_type,
5079
5080 static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
5081 {
5082 + struct asd_sas_phy *sas_phy = &phy->sas_phy;
5083 + struct sas_phy *sphy = sas_phy->phy;
5084 + struct sas_phy_data *d = sphy->hostdata;
5085 +
5086 phy->phy_attached = 0;
5087 phy->phy_type = 0;
5088 phy->port = NULL;
5089 +
5090 + if (d->enable)
5091 + sphy->negotiated_linkrate = SAS_LINK_RATE_UNKNOWN;
5092 + else
5093 + sphy->negotiated_linkrate = SAS_PHY_DISABLED;
5094 }
5095
5096 void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
5097 diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
5098 index fcbff83c0097..c9811d1aa007 100644
5099 --- a/drivers/scsi/megaraid/megaraid_sas_base.c
5100 +++ b/drivers/scsi/megaraid/megaraid_sas_base.c
5101 @@ -4188,6 +4188,7 @@ int megasas_alloc_cmds(struct megasas_instance *instance)
5102 if (megasas_create_frame_pool(instance)) {
5103 dev_printk(KERN_DEBUG, &instance->pdev->dev, "Error creating frame DMA pool\n");
5104 megasas_free_cmds(instance);
5105 + return -ENOMEM;
5106 }
5107
5108 return 0;
5109 diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
5110 index 9bbc19fc190b..9f9431a4cc0e 100644
5111 --- a/drivers/scsi/qedf/qedf_main.c
5112 +++ b/drivers/scsi/qedf/qedf_main.c
5113 @@ -1418,7 +1418,7 @@ static struct libfc_function_template qedf_lport_template = {
5114
5115 static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
5116 {
5117 - fcoe_ctlr_init(&qedf->ctlr, FIP_ST_AUTO);
5118 + fcoe_ctlr_init(&qedf->ctlr, FIP_MODE_AUTO);
5119
5120 qedf->ctlr.send = qedf_fip_send;
5121 qedf->ctlr.get_src_addr = qedf_get_src_mac;
5122 diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
5123 index dd0d516f65e2..53380e07b40e 100644
5124 --- a/drivers/scsi/scsi_scan.c
5125 +++ b/drivers/scsi/scsi_scan.c
5126 @@ -220,7 +220,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
5127 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
5128
5129 sdev = kzalloc(sizeof(*sdev) + shost->transportt->device_size,
5130 - GFP_ATOMIC);
5131 + GFP_KERNEL);
5132 if (!sdev)
5133 goto out;
5134
5135 @@ -788,7 +788,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
5136 */
5137 sdev->inquiry = kmemdup(inq_result,
5138 max_t(size_t, sdev->inquiry_len, 36),
5139 - GFP_ATOMIC);
5140 + GFP_KERNEL);
5141 if (sdev->inquiry == NULL)
5142 return SCSI_SCAN_NO_RESPONSE;
5143
5144 @@ -1079,7 +1079,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
5145 if (!sdev)
5146 goto out;
5147
5148 - result = kmalloc(result_len, GFP_ATOMIC |
5149 + result = kmalloc(result_len, GFP_KERNEL |
5150 ((shost->unchecked_isa_dma) ? __GFP_DMA : 0));
5151 if (!result)
5152 goto out_free_sdev;
5153 diff --git a/drivers/soc/qcom/qcom_gsbi.c b/drivers/soc/qcom/qcom_gsbi.c
5154 index 09c669e70d63..038abc377fdb 100644
5155 --- a/drivers/soc/qcom/qcom_gsbi.c
5156 +++ b/drivers/soc/qcom/qcom_gsbi.c
5157 @@ -138,7 +138,7 @@ static int gsbi_probe(struct platform_device *pdev)
5158 struct resource *res;
5159 void __iomem *base;
5160 struct gsbi_info *gsbi;
5161 - int i;
5162 + int i, ret;
5163 u32 mask, gsbi_num;
5164 const struct crci_config *config = NULL;
5165
5166 @@ -221,7 +221,10 @@ static int gsbi_probe(struct platform_device *pdev)
5167
5168 platform_set_drvdata(pdev, gsbi);
5169
5170 - return of_platform_populate(node, NULL, NULL, &pdev->dev);
5171 + ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
5172 + if (ret)
5173 + clk_disable_unprepare(gsbi->hclk);
5174 + return ret;
5175 }
5176
5177 static int gsbi_remove(struct platform_device *pdev)
5178 diff --git a/drivers/soc/tegra/fuse/fuse-tegra.c b/drivers/soc/tegra/fuse/fuse-tegra.c
5179 index a33ee8ef8b6b..51625703399e 100644
5180 --- a/drivers/soc/tegra/fuse/fuse-tegra.c
5181 +++ b/drivers/soc/tegra/fuse/fuse-tegra.c
5182 @@ -137,13 +137,17 @@ static int tegra_fuse_probe(struct platform_device *pdev)
5183 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
5184 fuse->phys = res->start;
5185 fuse->base = devm_ioremap_resource(&pdev->dev, res);
5186 - if (IS_ERR(fuse->base))
5187 - return PTR_ERR(fuse->base);
5188 + if (IS_ERR(fuse->base)) {
5189 + err = PTR_ERR(fuse->base);
5190 + fuse->base = base;
5191 + return err;
5192 + }
5193
5194 fuse->clk = devm_clk_get(&pdev->dev, "fuse");
5195 if (IS_ERR(fuse->clk)) {
5196 dev_err(&pdev->dev, "failed to get FUSE clock: %ld",
5197 PTR_ERR(fuse->clk));
5198 + fuse->base = base;
5199 return PTR_ERR(fuse->clk);
5200 }
5201
5202 @@ -152,8 +156,10 @@ static int tegra_fuse_probe(struct platform_device *pdev)
5203
5204 if (fuse->soc->probe) {
5205 err = fuse->soc->probe(fuse);
5206 - if (err < 0)
5207 + if (err < 0) {
5208 + fuse->base = base;
5209 return err;
5210 + }
5211 }
5212
5213 if (tegra_fuse_create_sysfs(&pdev->dev, fuse->soc->info->size,
5214 diff --git a/drivers/staging/iio/addac/adt7316.c b/drivers/staging/iio/addac/adt7316.c
5215 index dc93e85808e0..7839d869d25d 100644
5216 --- a/drivers/staging/iio/addac/adt7316.c
5217 +++ b/drivers/staging/iio/addac/adt7316.c
5218 @@ -651,17 +651,10 @@ static ssize_t adt7316_store_da_high_resolution(struct device *dev,
5219 u8 config3;
5220 int ret;
5221
5222 - chip->dac_bits = 8;
5223 -
5224 - if (buf[0] == '1') {
5225 + if (buf[0] == '1')
5226 config3 = chip->config3 | ADT7316_DA_HIGH_RESOLUTION;
5227 - if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
5228 - chip->dac_bits = 12;
5229 - else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
5230 - chip->dac_bits = 10;
5231 - } else {
5232 + else
5233 config3 = chip->config3 & (~ADT7316_DA_HIGH_RESOLUTION);
5234 - }
5235
5236 ret = chip->bus.write(chip->bus.client, ADT7316_CONFIG3, config3);
5237 if (ret)
5238 @@ -2123,6 +2116,13 @@ int adt7316_probe(struct device *dev, struct adt7316_bus *bus,
5239 else
5240 return -ENODEV;
5241
5242 + if (chip->id == ID_ADT7316 || chip->id == ID_ADT7516)
5243 + chip->dac_bits = 12;
5244 + else if (chip->id == ID_ADT7317 || chip->id == ID_ADT7517)
5245 + chip->dac_bits = 10;
5246 + else
5247 + chip->dac_bits = 8;
5248 +
5249 chip->ldac_pin = devm_gpiod_get_optional(dev, "adi,ldac", GPIOD_OUT_LOW);
5250 if (IS_ERR(chip->ldac_pin)) {
5251 ret = PTR_ERR(chip->ldac_pin);
5252 diff --git a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
5253 index 5282236d1bb1..06daea66fb49 100644
5254 --- a/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
5255 +++ b/drivers/staging/media/rockchip/vpu/rk3288_vpu_hw_jpeg_enc.c
5256 @@ -80,7 +80,7 @@ rk3288_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
5257 void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
5258 {
5259 struct rockchip_vpu_dev *vpu = ctx->dev;
5260 - struct vb2_buffer *src_buf, *dst_buf;
5261 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
5262 struct rockchip_vpu_jpeg_ctx jpeg_ctx;
5263 u32 reg;
5264
5265 @@ -88,7 +88,7 @@ void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
5266 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
5267
5268 memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
5269 - jpeg_ctx.buffer = vb2_plane_vaddr(dst_buf, 0);
5270 + jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
5271 jpeg_ctx.width = ctx->dst_fmt.width;
5272 jpeg_ctx.height = ctx->dst_fmt.height;
5273 jpeg_ctx.quality = ctx->jpeg_quality;
5274 @@ -99,7 +99,7 @@ void rk3288_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
5275 VEPU_REG_ENC_CTRL);
5276
5277 rk3288_vpu_set_src_img_ctrl(vpu, ctx);
5278 - rk3288_vpu_jpeg_enc_set_buffers(vpu, ctx, src_buf);
5279 + rk3288_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
5280 rk3288_vpu_jpeg_enc_set_qtable(vpu,
5281 rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
5282 rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
5283 diff --git a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
5284 index dbc86d95fe3b..3d438797692e 100644
5285 --- a/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
5286 +++ b/drivers/staging/media/rockchip/vpu/rk3399_vpu_hw_jpeg_enc.c
5287 @@ -111,7 +111,7 @@ rk3399_vpu_jpeg_enc_set_qtable(struct rockchip_vpu_dev *vpu,
5288 void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
5289 {
5290 struct rockchip_vpu_dev *vpu = ctx->dev;
5291 - struct vb2_buffer *src_buf, *dst_buf;
5292 + struct vb2_v4l2_buffer *src_buf, *dst_buf;
5293 struct rockchip_vpu_jpeg_ctx jpeg_ctx;
5294 u32 reg;
5295
5296 @@ -119,7 +119,7 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
5297 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx);
5298
5299 memset(&jpeg_ctx, 0, sizeof(jpeg_ctx));
5300 - jpeg_ctx.buffer = vb2_plane_vaddr(dst_buf, 0);
5301 + jpeg_ctx.buffer = vb2_plane_vaddr(&dst_buf->vb2_buf, 0);
5302 jpeg_ctx.width = ctx->dst_fmt.width;
5303 jpeg_ctx.height = ctx->dst_fmt.height;
5304 jpeg_ctx.quality = ctx->jpeg_quality;
5305 @@ -130,7 +130,7 @@ void rk3399_vpu_jpeg_enc_run(struct rockchip_vpu_ctx *ctx)
5306 VEPU_REG_ENCODE_START);
5307
5308 rk3399_vpu_set_src_img_ctrl(vpu, ctx);
5309 - rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, src_buf);
5310 + rk3399_vpu_jpeg_enc_set_buffers(vpu, ctx, &src_buf->vb2_buf);
5311 rk3399_vpu_jpeg_enc_set_qtable(vpu,
5312 rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 0),
5313 rockchip_vpu_jpeg_get_qtable(&jpeg_ctx, 1));
5314 diff --git a/drivers/staging/mt7621-spi/spi-mt7621.c b/drivers/staging/mt7621-spi/spi-mt7621.c
5315 index 513b6e79b985..e1f50efd0922 100644
5316 --- a/drivers/staging/mt7621-spi/spi-mt7621.c
5317 +++ b/drivers/staging/mt7621-spi/spi-mt7621.c
5318 @@ -330,6 +330,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
5319 int status = 0;
5320 struct clk *clk;
5321 struct mt7621_spi_ops *ops;
5322 + int ret;
5323
5324 match = of_match_device(mt7621_spi_match, &pdev->dev);
5325 if (!match)
5326 @@ -377,7 +378,11 @@ static int mt7621_spi_probe(struct platform_device *pdev)
5327 rs->pending_write = 0;
5328 dev_info(&pdev->dev, "sys_freq: %u\n", rs->sys_freq);
5329
5330 - device_reset(&pdev->dev);
5331 + ret = device_reset(&pdev->dev);
5332 + if (ret) {
5333 + dev_err(&pdev->dev, "SPI reset failed!\n");
5334 + return ret;
5335 + }
5336
5337 mt7621_spi_reset(rs);
5338
5339 diff --git a/drivers/tty/serial/8250/8250_pxa.c b/drivers/tty/serial/8250/8250_pxa.c
5340 index b9bcbe20a2be..c47188860e32 100644
5341 --- a/drivers/tty/serial/8250/8250_pxa.c
5342 +++ b/drivers/tty/serial/8250/8250_pxa.c
5343 @@ -113,6 +113,10 @@ static int serial_pxa_probe(struct platform_device *pdev)
5344 if (ret)
5345 return ret;
5346
5347 + ret = of_alias_get_id(pdev->dev.of_node, "serial");
5348 + if (ret >= 0)
5349 + uart.port.line = ret;
5350 +
5351 uart.port.type = PORT_XSCALE;
5352 uart.port.iotype = UPIO_MEM32;
5353 uart.port.mapbase = mmres->start;
5354 diff --git a/drivers/tty/tty_buffer.c b/drivers/tty/tty_buffer.c
5355 index 77070c2d1240..ec145a59f199 100644
5356 --- a/drivers/tty/tty_buffer.c
5357 +++ b/drivers/tty/tty_buffer.c
5358 @@ -26,7 +26,7 @@
5359 * Byte threshold to limit memory consumption for flip buffers.
5360 * The actual memory limit is > 2x this amount.
5361 */
5362 -#define TTYB_DEFAULT_MEM_LIMIT 65536
5363 +#define TTYB_DEFAULT_MEM_LIMIT (640 * 1024UL)
5364
5365 /*
5366 * We default to dicing tty buffer allocations to this many characters
5367 diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
5368 index 7bfcbb23c2a4..016e4004fe9d 100644
5369 --- a/drivers/usb/chipidea/core.c
5370 +++ b/drivers/usb/chipidea/core.c
5371 @@ -954,8 +954,15 @@ static int ci_hdrc_probe(struct platform_device *pdev)
5372 } else if (ci->platdata->usb_phy) {
5373 ci->usb_phy = ci->platdata->usb_phy;
5374 } else {
5375 + ci->usb_phy = devm_usb_get_phy_by_phandle(dev->parent, "phys",
5376 + 0);
5377 ci->phy = devm_phy_get(dev->parent, "usb-phy");
5378 - ci->usb_phy = devm_usb_get_phy(dev->parent, USB_PHY_TYPE_USB2);
5379 +
5380 + /* Fallback to grabbing any registered USB2 PHY */
5381 + if (IS_ERR(ci->usb_phy) &&
5382 + PTR_ERR(ci->usb_phy) != -EPROBE_DEFER)
5383 + ci->usb_phy = devm_usb_get_phy(dev->parent,
5384 + USB_PHY_TYPE_USB2);
5385
5386 /* if both generic PHY and USB PHY layers aren't enabled */
5387 if (PTR_ERR(ci->phy) == -ENOSYS &&
5388 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
5389 index 6c9b76bcc2e1..8d1dbe36db92 100644
5390 --- a/drivers/usb/dwc3/gadget.c
5391 +++ b/drivers/usb/dwc3/gadget.c
5392 @@ -3339,6 +3339,8 @@ int dwc3_gadget_init(struct dwc3 *dwc)
5393 goto err4;
5394 }
5395
5396 + dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed);
5397 +
5398 return 0;
5399
5400 err4:
5401 diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
5402 index 1e5430438703..0f8d16de7a37 100644
5403 --- a/drivers/usb/gadget/function/f_fs.c
5404 +++ b/drivers/usb/gadget/function/f_fs.c
5405 @@ -1082,6 +1082,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
5406 * condition with req->complete callback.
5407 */
5408 usb_ep_dequeue(ep->ep, req);
5409 + wait_for_completion(&done);
5410 interrupted = ep->status < 0;
5411 }
5412
5413 diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
5414 index feb90764a811..53b8ceea9bde 100644
5415 --- a/drivers/video/backlight/pwm_bl.c
5416 +++ b/drivers/video/backlight/pwm_bl.c
5417 @@ -435,7 +435,7 @@ static int pwm_backlight_initial_power_state(const struct pwm_bl_data *pb)
5418 */
5419
5420 /* if the enable GPIO is disabled, do not enable the backlight */
5421 - if (pb->enable_gpio && gpiod_get_value(pb->enable_gpio) == 0)
5422 + if (pb->enable_gpio && gpiod_get_value_cansleep(pb->enable_gpio) == 0)
5423 return FB_BLANK_POWERDOWN;
5424
5425 /* The regulator is disabled, do not enable the backlight */
5426 diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
5427 index cb43a2258c51..4721491e6c8c 100644
5428 --- a/drivers/video/fbdev/core/fbmem.c
5429 +++ b/drivers/video/fbdev/core/fbmem.c
5430 @@ -431,6 +431,9 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
5431 {
5432 unsigned int x;
5433
5434 + if (image->width > info->var.xres || image->height > info->var.yres)
5435 + return;
5436 +
5437 if (rotate == FB_ROTATE_UR) {
5438 for (x = 0;
5439 x < num && image->dx + image->width <= info->var.xres;
5440 diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
5441 index cba6b586bfbd..d97fcfc5e558 100644
5442 --- a/drivers/xen/gntdev-dmabuf.c
5443 +++ b/drivers/xen/gntdev-dmabuf.c
5444 @@ -80,6 +80,12 @@ struct gntdev_dmabuf_priv {
5445 struct list_head imp_list;
5446 /* This is the lock which protects dma_buf_xxx lists. */
5447 struct mutex lock;
5448 + /*
5449 + * We reference this file while exporting dma-bufs, so
5450 + * the grant device context is not destroyed while there are
5451 + * external users alive.
5452 + */
5453 + struct file *filp;
5454 };
5455
5456 /* DMA buffer export support. */
5457 @@ -311,6 +317,7 @@ static void dmabuf_exp_release(struct kref *kref)
5458
5459 dmabuf_exp_wait_obj_signal(gntdev_dmabuf->priv, gntdev_dmabuf);
5460 list_del(&gntdev_dmabuf->next);
5461 + fput(gntdev_dmabuf->priv->filp);
5462 kfree(gntdev_dmabuf);
5463 }
5464
5465 @@ -423,6 +430,7 @@ static int dmabuf_exp_from_pages(struct gntdev_dmabuf_export_args *args)
5466 mutex_lock(&args->dmabuf_priv->lock);
5467 list_add(&gntdev_dmabuf->next, &args->dmabuf_priv->exp_list);
5468 mutex_unlock(&args->dmabuf_priv->lock);
5469 + get_file(gntdev_dmabuf->priv->filp);
5470 return 0;
5471
5472 fail:
5473 @@ -834,7 +842,7 @@ long gntdev_ioctl_dmabuf_imp_release(struct gntdev_priv *priv,
5474 return dmabuf_imp_release(priv->dmabuf_priv, op.fd);
5475 }
5476
5477 -struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
5478 +struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp)
5479 {
5480 struct gntdev_dmabuf_priv *priv;
5481
5482 @@ -847,6 +855,8 @@ struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void)
5483 INIT_LIST_HEAD(&priv->exp_wait_list);
5484 INIT_LIST_HEAD(&priv->imp_list);
5485
5486 + priv->filp = filp;
5487 +
5488 return priv;
5489 }
5490
5491 diff --git a/drivers/xen/gntdev-dmabuf.h b/drivers/xen/gntdev-dmabuf.h
5492 index 7220a53d0fc5..3d9b9cf9d5a1 100644
5493 --- a/drivers/xen/gntdev-dmabuf.h
5494 +++ b/drivers/xen/gntdev-dmabuf.h
5495 @@ -14,7 +14,7 @@
5496 struct gntdev_dmabuf_priv;
5497 struct gntdev_priv;
5498
5499 -struct gntdev_dmabuf_priv *gntdev_dmabuf_init(void);
5500 +struct gntdev_dmabuf_priv *gntdev_dmabuf_init(struct file *filp);
5501
5502 void gntdev_dmabuf_fini(struct gntdev_dmabuf_priv *priv);
5503
5504 diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
5505 index 5efc5eee9544..7cf9c51318aa 100644
5506 --- a/drivers/xen/gntdev.c
5507 +++ b/drivers/xen/gntdev.c
5508 @@ -600,7 +600,7 @@ static int gntdev_open(struct inode *inode, struct file *flip)
5509 mutex_init(&priv->lock);
5510
5511 #ifdef CONFIG_XEN_GNTDEV_DMABUF
5512 - priv->dmabuf_priv = gntdev_dmabuf_init();
5513 + priv->dmabuf_priv = gntdev_dmabuf_init(flip);
5514 if (IS_ERR(priv->dmabuf_priv)) {
5515 ret = PTR_ERR(priv->dmabuf_priv);
5516 kfree(priv);
5517 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
5518 index 0a6615573351..1b68700bc1c5 100644
5519 --- a/fs/btrfs/extent-tree.c
5520 +++ b/fs/btrfs/extent-tree.c
5521 @@ -4808,6 +4808,7 @@ skip_async:
5522 }
5523
5524 struct reserve_ticket {
5525 + u64 orig_bytes;
5526 u64 bytes;
5527 int error;
5528 struct list_head list;
5529 @@ -5030,7 +5031,7 @@ static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
5530 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
5531 }
5532
5533 -static void wake_all_tickets(struct list_head *head)
5534 +static bool wake_all_tickets(struct list_head *head)
5535 {
5536 struct reserve_ticket *ticket;
5537
5538 @@ -5039,7 +5040,10 @@ static void wake_all_tickets(struct list_head *head)
5539 list_del_init(&ticket->list);
5540 ticket->error = -ENOSPC;
5541 wake_up(&ticket->wait);
5542 + if (ticket->bytes != ticket->orig_bytes)
5543 + return true;
5544 }
5545 + return false;
5546 }
5547
5548 /*
5549 @@ -5094,8 +5098,12 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
5550 if (flush_state > COMMIT_TRANS) {
5551 commit_cycles++;
5552 if (commit_cycles > 2) {
5553 - wake_all_tickets(&space_info->tickets);
5554 - space_info->flush = 0;
5555 + if (wake_all_tickets(&space_info->tickets)) {
5556 + flush_state = FLUSH_DELAYED_ITEMS_NR;
5557 + commit_cycles--;
5558 + } else {
5559 + space_info->flush = 0;
5560 + }
5561 } else {
5562 flush_state = FLUSH_DELAYED_ITEMS_NR;
5563 }
5564 @@ -5147,10 +5155,11 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
5565
5566 static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
5567 struct btrfs_space_info *space_info,
5568 - struct reserve_ticket *ticket, u64 orig_bytes)
5569 + struct reserve_ticket *ticket)
5570
5571 {
5572 DEFINE_WAIT(wait);
5573 + u64 reclaim_bytes = 0;
5574 int ret = 0;
5575
5576 spin_lock(&space_info->lock);
5577 @@ -5171,14 +5180,12 @@ static int wait_reserve_ticket(struct btrfs_fs_info *fs_info,
5578 ret = ticket->error;
5579 if (!list_empty(&ticket->list))
5580 list_del_init(&ticket->list);
5581 - if (ticket->bytes && ticket->bytes < orig_bytes) {
5582 - u64 num_bytes = orig_bytes - ticket->bytes;
5583 - update_bytes_may_use(space_info, -num_bytes);
5584 - trace_btrfs_space_reservation(fs_info, "space_info",
5585 - space_info->flags, num_bytes, 0);
5586 - }
5587 + if (ticket->bytes && ticket->bytes < ticket->orig_bytes)
5588 + reclaim_bytes = ticket->orig_bytes - ticket->bytes;
5589 spin_unlock(&space_info->lock);
5590
5591 + if (reclaim_bytes)
5592 + space_info_add_old_bytes(fs_info, space_info, reclaim_bytes);
5593 return ret;
5594 }
5595
5596 @@ -5204,6 +5211,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
5597 {
5598 struct reserve_ticket ticket;
5599 u64 used;
5600 + u64 reclaim_bytes = 0;
5601 int ret = 0;
5602
5603 ASSERT(orig_bytes);
5604 @@ -5239,6 +5247,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
5605 * the list and we will do our own flushing further down.
5606 */
5607 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5608 + ticket.orig_bytes = orig_bytes;
5609 ticket.bytes = orig_bytes;
5610 ticket.error = 0;
5611 init_waitqueue_head(&ticket.wait);
5612 @@ -5279,25 +5288,21 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
5613 return ret;
5614
5615 if (flush == BTRFS_RESERVE_FLUSH_ALL)
5616 - return wait_reserve_ticket(fs_info, space_info, &ticket,
5617 - orig_bytes);
5618 + return wait_reserve_ticket(fs_info, space_info, &ticket);
5619
5620 ret = 0;
5621 priority_reclaim_metadata_space(fs_info, space_info, &ticket);
5622 spin_lock(&space_info->lock);
5623 if (ticket.bytes) {
5624 - if (ticket.bytes < orig_bytes) {
5625 - u64 num_bytes = orig_bytes - ticket.bytes;
5626 - update_bytes_may_use(space_info, -num_bytes);
5627 - trace_btrfs_space_reservation(fs_info, "space_info",
5628 - space_info->flags,
5629 - num_bytes, 0);
5630 -
5631 - }
5632 + if (ticket.bytes < orig_bytes)
5633 + reclaim_bytes = orig_bytes - ticket.bytes;
5634 list_del_init(&ticket.list);
5635 ret = -ENOSPC;
5636 }
5637 spin_unlock(&space_info->lock);
5638 +
5639 + if (reclaim_bytes)
5640 + space_info_add_old_bytes(fs_info, space_info, reclaim_bytes);
5641 ASSERT(list_empty(&ticket.list));
5642 return ret;
5643 }
5644 @@ -8690,6 +8695,8 @@ struct walk_control {
5645 u64 refs[BTRFS_MAX_LEVEL];
5646 u64 flags[BTRFS_MAX_LEVEL];
5647 struct btrfs_key update_progress;
5648 + struct btrfs_key drop_progress;
5649 + int drop_level;
5650 int stage;
5651 int level;
5652 int shared_level;
5653 @@ -9028,6 +9035,16 @@ skip:
5654 ret);
5655 }
5656 }
5657 +
5658 + /*
5659 + * We need to update the next key in our walk control so we can
5660 + * update the drop_progress key accordingly. We don't care if
5661 + * find_next_key doesn't find a key because that means we're at
5662 + * the end and are going to clean up now.
5663 + */
5664 + wc->drop_level = level;
5665 + find_next_key(path, level, &wc->drop_progress);
5666 +
5667 ret = btrfs_free_extent(trans, root, bytenr, fs_info->nodesize,
5668 parent, root->root_key.objectid,
5669 level - 1, 0);
5670 @@ -9378,12 +9395,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
5671 }
5672
5673 if (wc->stage == DROP_REFERENCE) {
5674 - level = wc->level;
5675 - btrfs_node_key(path->nodes[level],
5676 - &root_item->drop_progress,
5677 - path->slots[level]);
5678 - root_item->drop_level = level;
5679 - }
5680 + wc->drop_level = wc->level;
5681 + btrfs_node_key_to_cpu(path->nodes[wc->drop_level],
5682 + &wc->drop_progress,
5683 + path->slots[wc->drop_level]);
5684 + }
5685 + btrfs_cpu_key_to_disk(&root_item->drop_progress,
5686 + &wc->drop_progress);
5687 + root_item->drop_level = wc->drop_level;
5688
5689 BUG_ON(wc->level == 0);
5690 if (btrfs_should_end_transaction(trans) ||
5691 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
5692 index 543dd5e66f31..e28fb43e943b 100644
5693 --- a/fs/btrfs/qgroup.c
5694 +++ b/fs/btrfs/qgroup.c
5695 @@ -2842,16 +2842,15 @@ out:
5696 /*
5697 * Two limits to commit transaction in advance.
5698 *
5699 - * For RATIO, it will be 1/RATIO of the remaining limit
5700 - * (excluding data and prealloc meta) as threshold.
5701 + * For RATIO, it will be 1/RATIO of the remaining limit as threshold.
5702 * For SIZE, it will be in byte unit as threshold.
5703 */
5704 -#define QGROUP_PERTRANS_RATIO 32
5705 -#define QGROUP_PERTRANS_SIZE SZ_32M
5706 +#define QGROUP_FREE_RATIO 32
5707 +#define QGROUP_FREE_SIZE SZ_32M
5708 static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
5709 const struct btrfs_qgroup *qg, u64 num_bytes)
5710 {
5711 - u64 limit;
5712 + u64 free;
5713 u64 threshold;
5714
5715 if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
5716 @@ -2870,20 +2869,21 @@ static bool qgroup_check_limits(struct btrfs_fs_info *fs_info,
5717 */
5718 if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER |
5719 BTRFS_QGROUP_LIMIT_MAX_EXCL))) {
5720 - if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL)
5721 - limit = qg->max_excl;
5722 - else
5723 - limit = qg->max_rfer;
5724 - threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] -
5725 - qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) /
5726 - QGROUP_PERTRANS_RATIO;
5727 - threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE);
5728 + if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) {
5729 + free = qg->max_excl - qgroup_rsv_total(qg) - qg->excl;
5730 + threshold = min_t(u64, qg->max_excl / QGROUP_FREE_RATIO,
5731 + QGROUP_FREE_SIZE);
5732 + } else {
5733 + free = qg->max_rfer - qgroup_rsv_total(qg) - qg->rfer;
5734 + threshold = min_t(u64, qg->max_rfer / QGROUP_FREE_RATIO,
5735 + QGROUP_FREE_SIZE);
5736 + }
5737
5738 /*
5739 * Use transaction_kthread to commit transaction, so we no
5740 * longer need to bother nested transaction nor lock context.
5741 */
5742 - if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold)
5743 + if (free < threshold)
5744 btrfs_commit_transaction_locksafe(fs_info);
5745 }
5746
5747 diff --git a/fs/buffer.c b/fs/buffer.c
5748 index 48318fb74938..cab7a026876b 100644
5749 --- a/fs/buffer.c
5750 +++ b/fs/buffer.c
5751 @@ -3027,6 +3027,13 @@ void guard_bio_eod(int op, struct bio *bio)
5752 /* Uhhuh. We've got a bio that straddles the device size! */
5753 truncated_bytes = bio->bi_iter.bi_size - (maxsector << 9);
5754
5755 + /*
5756 + * The bio contains more than one segment which spans EOD, just return
5757 + * and let IO layer turn it into an EIO
5758 + */
5759 + if (truncated_bytes > bvec->bv_len)
5760 + return;
5761 +
5762 /* Truncate the bio.. */
5763 bio->bi_iter.bi_size -= truncated_bytes;
5764 bvec->bv_len -= truncated_bytes;
5765 diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
5766 index d9b99abe1243..5d83c924cc47 100644
5767 --- a/fs/cifs/cifs_dfs_ref.c
5768 +++ b/fs/cifs/cifs_dfs_ref.c
5769 @@ -285,9 +285,9 @@ static void dump_referral(const struct dfs_info3_param *ref)
5770 {
5771 cifs_dbg(FYI, "DFS: ref path: %s\n", ref->path_name);
5772 cifs_dbg(FYI, "DFS: node path: %s\n", ref->node_name);
5773 - cifs_dbg(FYI, "DFS: fl: %hd, srv_type: %hd\n",
5774 + cifs_dbg(FYI, "DFS: fl: %d, srv_type: %d\n",
5775 ref->flags, ref->server_type);
5776 - cifs_dbg(FYI, "DFS: ref_flags: %hd, path_consumed: %hd\n",
5777 + cifs_dbg(FYI, "DFS: ref_flags: %d, path_consumed: %d\n",
5778 ref->ref_flag, ref->path_consumed);
5779 }
5780
5781 diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
5782 index e61cd2938c9e..9d4e60123db4 100644
5783 --- a/fs/cifs/connect.c
5784 +++ b/fs/cifs/connect.c
5785 @@ -1487,6 +1487,11 @@ cifs_parse_devname(const char *devname, struct smb_vol *vol)
5786 const char *delims = "/\\";
5787 size_t len;
5788
5789 + if (unlikely(!devname || !*devname)) {
5790 + cifs_dbg(VFS, "Device name not specified.\n");
5791 + return -EINVAL;
5792 + }
5793 +
5794 /* make sure we have a valid UNC double delimiter prefix */
5795 len = strspn(devname, delims);
5796 if (len != 2)
5797 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
5798 index 95461db80011..8d107587208f 100644
5799 --- a/fs/cifs/file.c
5800 +++ b/fs/cifs/file.c
5801 @@ -1645,8 +1645,20 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
5802 rc = server->ops->mand_unlock_range(cfile, flock, xid);
5803
5804 out:
5805 - if (flock->fl_flags & FL_POSIX && !rc)
5806 + if (flock->fl_flags & FL_POSIX) {
5807 + /*
5808 + * If this is a request to remove all locks because we
5809 + * are closing the file, it doesn't matter if the
5810 + * unlocking failed as both cifs.ko and the SMB server
5811 + * remove the lock on file close
5812 + */
5813 + if (rc) {
5814 + cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
5815 + if (!(flock->fl_flags & FL_CLOSE))
5816 + return rc;
5817 + }
5818 rc = locks_lock_file_wait(file, flock);
5819 + }
5820 return rc;
5821 }
5822
5823 diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
5824 index 32a6c020478f..20a88776f04d 100644
5825 --- a/fs/cifs/smb1ops.c
5826 +++ b/fs/cifs/smb1ops.c
5827 @@ -308,7 +308,7 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
5828 remaining = tgt_total_cnt - total_in_tgt;
5829
5830 if (remaining < 0) {
5831 - cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%hu\n",
5832 + cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%u\n",
5833 tgt_total_cnt, total_in_tgt);
5834 return -EPROTO;
5835 }
5836 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
5837 index 104905732fbe..53642a237bf9 100644
5838 --- a/fs/cifs/smb2pdu.c
5839 +++ b/fs/cifs/smb2pdu.c
5840 @@ -986,8 +986,14 @@ int smb3_validate_negotiate(const unsigned int xid, struct cifs_tcon *tcon)
5841 rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
5842 FSCTL_VALIDATE_NEGOTIATE_INFO, true /* is_fsctl */,
5843 (char *)pneg_inbuf, inbuflen, (char **)&pneg_rsp, &rsplen);
5844 -
5845 - if (rc != 0) {
5846 + if (rc == -EOPNOTSUPP) {
5847 + /*
5848 + * Old Windows versions or Netapp SMB server can return
5849 + * not supported error. Client should accept it.
5850 + */
5851 + cifs_dbg(VFS, "Server does not support validate negotiate\n");
5852 + return 0;
5853 + } else if (rc != 0) {
5854 cifs_dbg(VFS, "validate protocol negotiate failed: %d\n", rc);
5855 rc = -EIO;
5856 goto out_free_inbuf;
5857 diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
5858 index 240b6dea5441..252bbbb5a2f4 100644
5859 --- a/fs/ext4/extents.c
5860 +++ b/fs/ext4/extents.c
5861 @@ -2956,14 +2956,17 @@ again:
5862 if (err < 0)
5863 goto out;
5864
5865 - } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
5866 + } else if (sbi->s_cluster_ratio > 1 && end >= ex_end &&
5867 + partial.state == initial) {
5868 /*
5869 - * If there's an extent to the right its first cluster
5870 - * contains the immediate right boundary of the
5871 - * truncated/punched region. Set partial_cluster to
5872 - * its negative value so it won't be freed if shared
5873 - * with the current extent. The end < ee_block case
5874 - * is handled in ext4_ext_rm_leaf().
5875 + * If we're punching, there's an extent to the right.
5876 + * If the partial cluster hasn't been set, set it to
5877 + * that extent's first cluster and its state to nofree
5878 + * so it won't be freed should it contain blocks to be
5879 + * removed. If it's already set (tofree/nofree), we're
5880 + * retrying and keep the original partial cluster info
5881 + * so a cluster marked tofree as a result of earlier
5882 + * extent removal is not lost.
5883 */
5884 lblk = ex_end + 1;
5885 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
5886 diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
5887 index 9e96a0bd08d9..e1801b288847 100644
5888 --- a/fs/ext4/indirect.c
5889 +++ b/fs/ext4/indirect.c
5890 @@ -1219,6 +1219,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
5891 ext4_lblk_t offsets[4], offsets2[4];
5892 Indirect chain[4], chain2[4];
5893 Indirect *partial, *partial2;
5894 + Indirect *p = NULL, *p2 = NULL;
5895 ext4_lblk_t max_block;
5896 __le32 nr = 0, nr2 = 0;
5897 int n = 0, n2 = 0;
5898 @@ -1260,7 +1261,7 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
5899 }
5900
5901
5902 - partial = ext4_find_shared(inode, n, offsets, chain, &nr);
5903 + partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
5904 if (nr) {
5905 if (partial == chain) {
5906 /* Shared branch grows from the inode */
5907 @@ -1285,13 +1286,11 @@ int ext4_ind_remove_space(handle_t *handle, struct inode *inode,
5908 partial->p + 1,
5909 (__le32 *)partial->bh->b_data+addr_per_block,
5910 (chain+n-1) - partial);
5911 - BUFFER_TRACE(partial->bh, "call brelse");
5912 - brelse(partial->bh);
5913 partial--;
5914 }
5915
5916 end_range:
5917 - partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
5918 + partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
5919 if (nr2) {
5920 if (partial2 == chain2) {
5921 /*
5922 @@ -1321,16 +1320,14 @@ end_range:
5923 (__le32 *)partial2->bh->b_data,
5924 partial2->p,
5925 (chain2+n2-1) - partial2);
5926 - BUFFER_TRACE(partial2->bh, "call brelse");
5927 - brelse(partial2->bh);
5928 partial2--;
5929 }
5930 goto do_indirects;
5931 }
5932
5933 /* Punch happened within the same level (n == n2) */
5934 - partial = ext4_find_shared(inode, n, offsets, chain, &nr);
5935 - partial2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
5936 + partial = p = ext4_find_shared(inode, n, offsets, chain, &nr);
5937 + partial2 = p2 = ext4_find_shared(inode, n2, offsets2, chain2, &nr2);
5938
5939 /* Free top, but only if partial2 isn't its subtree. */
5940 if (nr) {
5941 @@ -1387,15 +1384,7 @@ end_range:
5942 partial->p + 1,
5943 partial2->p,
5944 (chain+n-1) - partial);
5945 - while (partial > chain) {
5946 - BUFFER_TRACE(partial->bh, "call brelse");
5947 - brelse(partial->bh);
5948 - }
5949 - while (partial2 > chain2) {
5950 - BUFFER_TRACE(partial2->bh, "call brelse");
5951 - brelse(partial2->bh);
5952 - }
5953 - return 0;
5954 + goto cleanup;
5955 }
5956
5957 /*
5958 @@ -1410,8 +1399,6 @@ end_range:
5959 partial->p + 1,
5960 (__le32 *)partial->bh->b_data+addr_per_block,
5961 (chain+n-1) - partial);
5962 - BUFFER_TRACE(partial->bh, "call brelse");
5963 - brelse(partial->bh);
5964 partial--;
5965 }
5966 if (partial2 > chain2 && depth2 <= depth) {
5967 @@ -1419,11 +1406,21 @@ end_range:
5968 (__le32 *)partial2->bh->b_data,
5969 partial2->p,
5970 (chain2+n2-1) - partial2);
5971 - BUFFER_TRACE(partial2->bh, "call brelse");
5972 - brelse(partial2->bh);
5973 partial2--;
5974 }
5975 }
5976 +
5977 +cleanup:
5978 + while (p && p > chain) {
5979 + BUFFER_TRACE(p->bh, "call brelse");
5980 + brelse(p->bh);
5981 + p--;
5982 + }
5983 + while (p2 && p2 > chain2) {
5984 + BUFFER_TRACE(p2->bh, "call brelse");
5985 + brelse(p2->bh);
5986 + p2--;
5987 + }
5988 return 0;
5989
5990 do_indirects:
5991 @@ -1431,7 +1428,7 @@ do_indirects:
5992 switch (offsets[0]) {
5993 default:
5994 if (++n >= n2)
5995 - return 0;
5996 + break;
5997 nr = i_data[EXT4_IND_BLOCK];
5998 if (nr) {
5999 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 1);
6000 @@ -1439,7 +1436,7 @@ do_indirects:
6001 }
6002 case EXT4_IND_BLOCK:
6003 if (++n >= n2)
6004 - return 0;
6005 + break;
6006 nr = i_data[EXT4_DIND_BLOCK];
6007 if (nr) {
6008 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 2);
6009 @@ -1447,7 +1444,7 @@ do_indirects:
6010 }
6011 case EXT4_DIND_BLOCK:
6012 if (++n >= n2)
6013 - return 0;
6014 + break;
6015 nr = i_data[EXT4_TIND_BLOCK];
6016 if (nr) {
6017 ext4_free_branches(handle, inode, NULL, &nr, &nr+1, 3);
6018 @@ -1456,5 +1453,5 @@ do_indirects:
6019 case EXT4_TIND_BLOCK:
6020 ;
6021 }
6022 - return 0;
6023 + goto cleanup;
6024 }
6025 diff --git a/fs/f2fs/extent_cache.c b/fs/f2fs/extent_cache.c
6026 index 1cb0fcc67d2d..caf77fe8ac07 100644
6027 --- a/fs/f2fs/extent_cache.c
6028 +++ b/fs/f2fs/extent_cache.c
6029 @@ -506,7 +506,7 @@ static void f2fs_update_extent_tree_range(struct inode *inode,
6030 unsigned int end = fofs + len;
6031 unsigned int pos = (unsigned int)fofs;
6032 bool updated = false;
6033 - bool leftmost;
6034 + bool leftmost = false;
6035
6036 if (!et)
6037 return;
6038 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
6039 index 12fabd6735dd..279bc00489cc 100644
6040 --- a/fs/f2fs/f2fs.h
6041 +++ b/fs/f2fs/f2fs.h
6042 @@ -456,7 +456,6 @@ struct f2fs_flush_device {
6043
6044 /* for inline stuff */
6045 #define DEF_INLINE_RESERVED_SIZE 1
6046 -#define DEF_MIN_INLINE_SIZE 1
6047 static inline int get_extra_isize(struct inode *inode);
6048 static inline int get_inline_xattr_addrs(struct inode *inode);
6049 #define MAX_INLINE_DATA(inode) (sizeof(__le32) * \
6050 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
6051 index d636cbcf68f2..aacbb864ec1e 100644
6052 --- a/fs/f2fs/inline.c
6053 +++ b/fs/f2fs/inline.c
6054 @@ -659,6 +659,12 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
6055 if (IS_ERR(ipage))
6056 return PTR_ERR(ipage);
6057
6058 + /*
6059 + * f2fs_readdir was protected by inode.i_rwsem, it is safe to access
6060 + * ipage without page's lock held.
6061 + */
6062 + unlock_page(ipage);
6063 +
6064 inline_dentry = inline_data_addr(inode, ipage);
6065
6066 make_dentry_ptr_inline(inode, &d, inline_dentry);
6067 @@ -667,7 +673,7 @@ int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx,
6068 if (!err)
6069 ctx->pos = d.max;
6070
6071 - f2fs_put_page(ipage, 1);
6072 + f2fs_put_page(ipage, 0);
6073 return err < 0 ? err : 0;
6074 }
6075
6076 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
6077 index c46a1d4318d4..5892fa3c885f 100644
6078 --- a/fs/f2fs/super.c
6079 +++ b/fs/f2fs/super.c
6080 @@ -834,12 +834,13 @@ static int parse_options(struct super_block *sb, char *options)
6081 "set with inline_xattr option");
6082 return -EINVAL;
6083 }
6084 - if (!F2FS_OPTION(sbi).inline_xattr_size ||
6085 - F2FS_OPTION(sbi).inline_xattr_size >=
6086 - DEF_ADDRS_PER_INODE -
6087 - F2FS_TOTAL_EXTRA_ATTR_SIZE -
6088 - DEF_INLINE_RESERVED_SIZE -
6089 - DEF_MIN_INLINE_SIZE) {
6090 + if (F2FS_OPTION(sbi).inline_xattr_size <
6091 + sizeof(struct f2fs_xattr_header) / sizeof(__le32) ||
6092 + F2FS_OPTION(sbi).inline_xattr_size >
6093 + DEF_ADDRS_PER_INODE -
6094 + F2FS_TOTAL_EXTRA_ATTR_SIZE / sizeof(__le32) -
6095 + DEF_INLINE_RESERVED_SIZE -
6096 + MIN_INLINE_DENTRY_SIZE / sizeof(__le32)) {
6097 f2fs_msg(sb, KERN_ERR,
6098 "inline xattr size is out of range");
6099 return -EINVAL;
6100 @@ -915,6 +916,10 @@ static int f2fs_drop_inode(struct inode *inode)
6101 sb_start_intwrite(inode->i_sb);
6102 f2fs_i_size_write(inode, 0);
6103
6104 + f2fs_submit_merged_write_cond(F2FS_I_SB(inode),
6105 + inode, NULL, 0, DATA);
6106 + truncate_inode_pages_final(inode->i_mapping);
6107 +
6108 if (F2FS_HAS_BLOCKS(inode))
6109 f2fs_truncate(inode);
6110
6111 diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
6112 index 0575edbe3ed6..f1ab9000b294 100644
6113 --- a/fs/f2fs/sysfs.c
6114 +++ b/fs/f2fs/sysfs.c
6115 @@ -278,10 +278,16 @@ out:
6116 return count;
6117 }
6118
6119 - *ui = t;
6120
6121 - if (!strcmp(a->attr.name, "iostat_enable") && *ui == 0)
6122 - f2fs_reset_iostat(sbi);
6123 + if (!strcmp(a->attr.name, "iostat_enable")) {
6124 + sbi->iostat_enable = !!t;
6125 + if (!sbi->iostat_enable)
6126 + f2fs_reset_iostat(sbi);
6127 + return count;
6128 + }
6129 +
6130 + *ui = (unsigned int)t;
6131 +
6132 return count;
6133 }
6134
6135 diff --git a/fs/f2fs/trace.c b/fs/f2fs/trace.c
6136 index ce2a5eb210b6..d0ab533a9ce8 100644
6137 --- a/fs/f2fs/trace.c
6138 +++ b/fs/f2fs/trace.c
6139 @@ -14,7 +14,7 @@
6140 #include "trace.h"
6141
6142 static RADIX_TREE(pids, GFP_ATOMIC);
6143 -static struct mutex pids_lock;
6144 +static spinlock_t pids_lock;
6145 static struct last_io_info last_io;
6146
6147 static inline void __print_last_io(void)
6148 @@ -58,23 +58,29 @@ void f2fs_trace_pid(struct page *page)
6149
6150 set_page_private(page, (unsigned long)pid);
6151
6152 +retry:
6153 if (radix_tree_preload(GFP_NOFS))
6154 return;
6155
6156 - mutex_lock(&pids_lock);
6157 + spin_lock(&pids_lock);
6158 p = radix_tree_lookup(&pids, pid);
6159 if (p == current)
6160 goto out;
6161 if (p)
6162 radix_tree_delete(&pids, pid);
6163
6164 - f2fs_radix_tree_insert(&pids, pid, current);
6165 + if (radix_tree_insert(&pids, pid, current)) {
6166 + spin_unlock(&pids_lock);
6167 + radix_tree_preload_end();
6168 + cond_resched();
6169 + goto retry;
6170 + }
6171
6172 trace_printk("%3x:%3x %4x %-16s\n",
6173 MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev),
6174 pid, current->comm);
6175 out:
6176 - mutex_unlock(&pids_lock);
6177 + spin_unlock(&pids_lock);
6178 radix_tree_preload_end();
6179 }
6180
6181 @@ -119,7 +125,7 @@ void f2fs_trace_ios(struct f2fs_io_info *fio, int flush)
6182
6183 void f2fs_build_trace_ios(void)
6184 {
6185 - mutex_init(&pids_lock);
6186 + spin_lock_init(&pids_lock);
6187 }
6188
6189 #define PIDVEC_SIZE 128
6190 @@ -147,7 +153,7 @@ void f2fs_destroy_trace_ios(void)
6191 pid_t next_pid = 0;
6192 unsigned int found;
6193
6194 - mutex_lock(&pids_lock);
6195 + spin_lock(&pids_lock);
6196 while ((found = gang_lookup_pids(pid, next_pid, PIDVEC_SIZE))) {
6197 unsigned idx;
6198
6199 @@ -155,5 +161,5 @@ void f2fs_destroy_trace_ios(void)
6200 for (idx = 0; idx < found; idx++)
6201 radix_tree_delete(&pids, pid[idx]);
6202 }
6203 - mutex_unlock(&pids_lock);
6204 + spin_unlock(&pids_lock);
6205 }
6206 diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
6207 index 18d5ffbc5e8c..73b92985198b 100644
6208 --- a/fs/f2fs/xattr.c
6209 +++ b/fs/f2fs/xattr.c
6210 @@ -224,11 +224,11 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
6211 {
6212 struct f2fs_xattr_entry *entry;
6213 unsigned int inline_size = inline_xattr_size(inode);
6214 + void *max_addr = base_addr + inline_size;
6215
6216 list_for_each_xattr(entry, base_addr) {
6217 - if ((void *)entry + sizeof(__u32) > base_addr + inline_size ||
6218 - (void *)XATTR_NEXT_ENTRY(entry) + sizeof(__u32) >
6219 - base_addr + inline_size) {
6220 + if ((void *)entry + sizeof(__u32) > max_addr ||
6221 + (void *)XATTR_NEXT_ENTRY(entry) > max_addr) {
6222 *last_addr = entry;
6223 return NULL;
6224 }
6225 @@ -239,6 +239,13 @@ static struct f2fs_xattr_entry *__find_inline_xattr(struct inode *inode,
6226 if (!memcmp(entry->e_name, name, len))
6227 break;
6228 }
6229 +
6230 + /* inline xattr header or entry across max inline xattr size */
6231 + if (IS_XATTR_LAST_ENTRY(entry) &&
6232 + (void *)entry + sizeof(__u32) > max_addr) {
6233 + *last_addr = entry;
6234 + return NULL;
6235 + }
6236 return entry;
6237 }
6238
6239 diff --git a/fs/file.c b/fs/file.c
6240 index 3209ee271c41..a10487aa0a84 100644
6241 --- a/fs/file.c
6242 +++ b/fs/file.c
6243 @@ -457,6 +457,7 @@ struct files_struct init_files = {
6244 .full_fds_bits = init_files.full_fds_bits_init,
6245 },
6246 .file_lock = __SPIN_LOCK_UNLOCKED(init_files.file_lock),
6247 + .resize_wait = __WAIT_QUEUE_HEAD_INITIALIZER(init_files.resize_wait),
6248 };
6249
6250 static unsigned int find_next_fd(struct fdtable *fdt, unsigned int start)
6251 diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
6252 index 2eb55c3361a8..efd0ce9489ae 100644
6253 --- a/fs/jbd2/commit.c
6254 +++ b/fs/jbd2/commit.c
6255 @@ -694,9 +694,11 @@ void jbd2_journal_commit_transaction(journal_t *journal)
6256 the last tag we set up. */
6257
6258 tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
6259 -
6260 - jbd2_descriptor_block_csum_set(journal, descriptor);
6261 start_journal_io:
6262 + if (descriptor)
6263 + jbd2_descriptor_block_csum_set(journal,
6264 + descriptor);
6265 +
6266 for (i = 0; i < bufs; i++) {
6267 struct buffer_head *bh = wbuf[i];
6268 /*
6269 diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
6270 index 8ef6b6daaa7a..88f2a49338a1 100644
6271 --- a/fs/jbd2/journal.c
6272 +++ b/fs/jbd2/journal.c
6273 @@ -1356,6 +1356,10 @@ static int journal_reset(journal_t *journal)
6274 return jbd2_journal_start_thread(journal);
6275 }
6276
6277 +/*
6278 + * This function expects that the caller will have locked the journal
6279 + * buffer head, and will return with it unlocked
6280 + */
6281 static int jbd2_write_superblock(journal_t *journal, int write_flags)
6282 {
6283 struct buffer_head *bh = journal->j_sb_buffer;
6284 @@ -1365,7 +1369,6 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
6285 trace_jbd2_write_superblock(journal, write_flags);
6286 if (!(journal->j_flags & JBD2_BARRIER))
6287 write_flags &= ~(REQ_FUA | REQ_PREFLUSH);
6288 - lock_buffer(bh);
6289 if (buffer_write_io_error(bh)) {
6290 /*
6291 * Oh, dear. A previous attempt to write the journal
6292 @@ -1424,6 +1427,7 @@ int jbd2_journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
6293 jbd_debug(1, "JBD2: updating superblock (start %lu, seq %u)\n",
6294 tail_block, tail_tid);
6295
6296 + lock_buffer(journal->j_sb_buffer);
6297 sb->s_sequence = cpu_to_be32(tail_tid);
6298 sb->s_start = cpu_to_be32(tail_block);
6299
6300 @@ -1454,18 +1458,17 @@ static void jbd2_mark_journal_empty(journal_t *journal, int write_op)
6301 journal_superblock_t *sb = journal->j_superblock;
6302
6303 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
6304 - read_lock(&journal->j_state_lock);
6305 - /* Is it already empty? */
6306 - if (sb->s_start == 0) {
6307 - read_unlock(&journal->j_state_lock);
6308 + lock_buffer(journal->j_sb_buffer);
6309 + if (sb->s_start == 0) { /* Is it already empty? */
6310 + unlock_buffer(journal->j_sb_buffer);
6311 return;
6312 }
6313 +
6314 jbd_debug(1, "JBD2: Marking journal as empty (seq %d)\n",
6315 journal->j_tail_sequence);
6316
6317 sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
6318 sb->s_start = cpu_to_be32(0);
6319 - read_unlock(&journal->j_state_lock);
6320
6321 jbd2_write_superblock(journal, write_op);
6322
6323 @@ -1488,9 +1491,8 @@ void jbd2_journal_update_sb_errno(journal_t *journal)
6324 journal_superblock_t *sb = journal->j_superblock;
6325 int errcode;
6326
6327 - read_lock(&journal->j_state_lock);
6328 + lock_buffer(journal->j_sb_buffer);
6329 errcode = journal->j_errno;
6330 - read_unlock(&journal->j_state_lock);
6331 if (errcode == -ESHUTDOWN)
6332 errcode = 0;
6333 jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", errcode);
6334 @@ -1894,28 +1896,27 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
6335
6336 sb = journal->j_superblock;
6337
6338 + /* Load the checksum driver if necessary */
6339 + if ((journal->j_chksum_driver == NULL) &&
6340 + INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
6341 + journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
6342 + if (IS_ERR(journal->j_chksum_driver)) {
6343 + printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
6344 + journal->j_chksum_driver = NULL;
6345 + return 0;
6346 + }
6347 + /* Precompute checksum seed for all metadata */
6348 + journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
6349 + sizeof(sb->s_uuid));
6350 + }
6351 +
6352 + lock_buffer(journal->j_sb_buffer);
6353 +
6354 /* If enabling v3 checksums, update superblock */
6355 if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
6356 sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
6357 sb->s_feature_compat &=
6358 ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
6359 -
6360 - /* Load the checksum driver */
6361 - if (journal->j_chksum_driver == NULL) {
6362 - journal->j_chksum_driver = crypto_alloc_shash("crc32c",
6363 - 0, 0);
6364 - if (IS_ERR(journal->j_chksum_driver)) {
6365 - printk(KERN_ERR "JBD2: Cannot load crc32c "
6366 - "driver.\n");
6367 - journal->j_chksum_driver = NULL;
6368 - return 0;
6369 - }
6370 -
6371 - /* Precompute checksum seed for all metadata */
6372 - journal->j_csum_seed = jbd2_chksum(journal, ~0,
6373 - sb->s_uuid,
6374 - sizeof(sb->s_uuid));
6375 - }
6376 }
6377
6378 /* If enabling v1 checksums, downgrade superblock */
6379 @@ -1927,6 +1928,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
6380 sb->s_feature_compat |= cpu_to_be32(compat);
6381 sb->s_feature_ro_compat |= cpu_to_be32(ro);
6382 sb->s_feature_incompat |= cpu_to_be32(incompat);
6383 + unlock_buffer(journal->j_sb_buffer);
6384
6385 return 1;
6386 #undef COMPAT_FEATURE_ON
6387 diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
6388 index 0e4166cc23a0..4ac775e32240 100644
6389 --- a/fs/ocfs2/cluster/nodemanager.c
6390 +++ b/fs/ocfs2/cluster/nodemanager.c
6391 @@ -621,13 +621,15 @@ static void o2nm_node_group_drop_item(struct config_group *group,
6392 struct o2nm_node *node = to_o2nm_node(item);
6393 struct o2nm_cluster *cluster = to_o2nm_cluster(group->cg_item.ci_parent);
6394
6395 - o2net_disconnect_node(node);
6396 + if (cluster->cl_nodes[node->nd_num] == node) {
6397 + o2net_disconnect_node(node);
6398
6399 - if (cluster->cl_has_local &&
6400 - (cluster->cl_local_node == node->nd_num)) {
6401 - cluster->cl_has_local = 0;
6402 - cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
6403 - o2net_stop_listening(node);
6404 + if (cluster->cl_has_local &&
6405 + (cluster->cl_local_node == node->nd_num)) {
6406 + cluster->cl_has_local = 0;
6407 + cluster->cl_local_node = O2NM_INVALID_NODE_NUM;
6408 + o2net_stop_listening(node);
6409 + }
6410 }
6411
6412 /* XXX call into net to stop this node from trading messages */
6413 diff --git a/fs/read_write.c b/fs/read_write.c
6414 index ff3c5e6f87cf..27b69b85d49f 100644
6415 --- a/fs/read_write.c
6416 +++ b/fs/read_write.c
6417 @@ -1238,6 +1238,9 @@ COMPAT_SYSCALL_DEFINE5(preadv64v2, unsigned long, fd,
6418 const struct compat_iovec __user *,vec,
6419 unsigned long, vlen, loff_t, pos, rwf_t, flags)
6420 {
6421 + if (pos == -1)
6422 + return do_compat_readv(fd, vec, vlen, flags);
6423 +
6424 return do_compat_preadv64(fd, vec, vlen, pos, flags);
6425 }
6426 #endif
6427 @@ -1344,6 +1347,9 @@ COMPAT_SYSCALL_DEFINE5(pwritev64v2, unsigned long, fd,
6428 const struct compat_iovec __user *,vec,
6429 unsigned long, vlen, loff_t, pos, rwf_t, flags)
6430 {
6431 + if (pos == -1)
6432 + return do_compat_writev(fd, vec, vlen, flags);
6433 +
6434 return do_compat_pwritev64(fd, vec, vlen, pos, flags);
6435 }
6436 #endif
6437 diff --git a/include/linux/atalk.h b/include/linux/atalk.h
6438 index 23f805562f4e..840cf92307ba 100644
6439 --- a/include/linux/atalk.h
6440 +++ b/include/linux/atalk.h
6441 @@ -161,16 +161,26 @@ extern int sysctl_aarp_resolve_time;
6442 extern void atalk_register_sysctl(void);
6443 extern void atalk_unregister_sysctl(void);
6444 #else
6445 -#define atalk_register_sysctl() do { } while(0)
6446 -#define atalk_unregister_sysctl() do { } while(0)
6447 +static inline int atalk_register_sysctl(void)
6448 +{
6449 + return 0;
6450 +}
6451 +static inline void atalk_unregister_sysctl(void)
6452 +{
6453 +}
6454 #endif
6455
6456 #ifdef CONFIG_PROC_FS
6457 extern int atalk_proc_init(void);
6458 extern void atalk_proc_exit(void);
6459 #else
6460 -#define atalk_proc_init() ({ 0; })
6461 -#define atalk_proc_exit() do { } while(0)
6462 +static inline int atalk_proc_init(void)
6463 +{
6464 + return 0;
6465 +}
6466 +static inline void atalk_proc_exit(void)
6467 +{
6468 +}
6469 #endif /* CONFIG_PROC_FS */
6470
6471 #endif /* __LINUX_ATALK_H__ */
6472 diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
6473 index 8fcbae1b8db0..120d1d40704b 100644
6474 --- a/include/linux/cgroup-defs.h
6475 +++ b/include/linux/cgroup-defs.h
6476 @@ -602,7 +602,7 @@ struct cgroup_subsys {
6477 void (*cancel_fork)(struct task_struct *task);
6478 void (*fork)(struct task_struct *task);
6479 void (*exit)(struct task_struct *task);
6480 - void (*free)(struct task_struct *task);
6481 + void (*release)(struct task_struct *task);
6482 void (*bind)(struct cgroup_subsys_state *root_css);
6483
6484 bool early_init:1;
6485 diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
6486 index 9968332cceed..81f58b4a5418 100644
6487 --- a/include/linux/cgroup.h
6488 +++ b/include/linux/cgroup.h
6489 @@ -121,6 +121,7 @@ extern int cgroup_can_fork(struct task_struct *p);
6490 extern void cgroup_cancel_fork(struct task_struct *p);
6491 extern void cgroup_post_fork(struct task_struct *p);
6492 void cgroup_exit(struct task_struct *p);
6493 +void cgroup_release(struct task_struct *p);
6494 void cgroup_free(struct task_struct *p);
6495
6496 int cgroup_init_early(void);
6497 @@ -697,6 +698,7 @@ static inline int cgroup_can_fork(struct task_struct *p) { return 0; }
6498 static inline void cgroup_cancel_fork(struct task_struct *p) {}
6499 static inline void cgroup_post_fork(struct task_struct *p) {}
6500 static inline void cgroup_exit(struct task_struct *p) {}
6501 +static inline void cgroup_release(struct task_struct *p) {}
6502 static inline void cgroup_free(struct task_struct *p) {}
6503
6504 static inline int cgroup_init_early(void) { return 0; }
6505 diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
6506 index e443fa9fa859..b7cf80a71293 100644
6507 --- a/include/linux/clk-provider.h
6508 +++ b/include/linux/clk-provider.h
6509 @@ -792,6 +792,9 @@ unsigned int __clk_get_enable_count(struct clk *clk);
6510 unsigned long clk_hw_get_rate(const struct clk_hw *hw);
6511 unsigned long __clk_get_flags(struct clk *clk);
6512 unsigned long clk_hw_get_flags(const struct clk_hw *hw);
6513 +#define clk_hw_can_set_rate_parent(hw) \
6514 + (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT)
6515 +
6516 bool clk_hw_is_prepared(const struct clk_hw *hw);
6517 bool clk_hw_rate_is_protected(const struct clk_hw *hw);
6518 bool clk_hw_is_enabled(const struct clk_hw *hw);
6519 diff --git a/include/linux/efi.h b/include/linux/efi.h
6520 index 28604a8d0aa9..a86485ac7c87 100644
6521 --- a/include/linux/efi.h
6522 +++ b/include/linux/efi.h
6523 @@ -1699,19 +1699,19 @@ extern int efi_tpm_eventlog_init(void);
6524 * fault happened while executing an efi runtime service.
6525 */
6526 enum efi_rts_ids {
6527 - NONE,
6528 - GET_TIME,
6529 - SET_TIME,
6530 - GET_WAKEUP_TIME,
6531 - SET_WAKEUP_TIME,
6532 - GET_VARIABLE,
6533 - GET_NEXT_VARIABLE,
6534 - SET_VARIABLE,
6535 - QUERY_VARIABLE_INFO,
6536 - GET_NEXT_HIGH_MONO_COUNT,
6537 - RESET_SYSTEM,
6538 - UPDATE_CAPSULE,
6539 - QUERY_CAPSULE_CAPS,
6540 + EFI_NONE,
6541 + EFI_GET_TIME,
6542 + EFI_SET_TIME,
6543 + EFI_GET_WAKEUP_TIME,
6544 + EFI_SET_WAKEUP_TIME,
6545 + EFI_GET_VARIABLE,
6546 + EFI_GET_NEXT_VARIABLE,
6547 + EFI_SET_VARIABLE,
6548 + EFI_QUERY_VARIABLE_INFO,
6549 + EFI_GET_NEXT_HIGH_MONO_COUNT,
6550 + EFI_RESET_SYSTEM,
6551 + EFI_UPDATE_CAPSULE,
6552 + EFI_QUERY_CAPSULE_CAPS,
6553 };
6554
6555 /*
6556 diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
6557 index d7711048ef93..c524ad7d31da 100644
6558 --- a/include/linux/f2fs_fs.h
6559 +++ b/include/linux/f2fs_fs.h
6560 @@ -489,12 +489,12 @@ typedef __le32 f2fs_hash_t;
6561
6562 /*
6563 * space utilization of regular dentry and inline dentry (w/o extra reservation)
6564 - * regular dentry inline dentry
6565 - * bitmap 1 * 27 = 27 1 * 23 = 23
6566 - * reserved 1 * 3 = 3 1 * 7 = 7
6567 - * dentry 11 * 214 = 2354 11 * 182 = 2002
6568 - * filename 8 * 214 = 1712 8 * 182 = 1456
6569 - * total 4096 3488
6570 + * regular dentry inline dentry (def) inline dentry (min)
6571 + * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1
6572 + * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1
6573 + * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22
6574 + * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16
6575 + * total 4096 3488 40
6576 *
6577 * Note: there are more reserved space in inline dentry than in regular
6578 * dentry, when converting inline dentry we should handle this carefully.
6579 @@ -506,6 +506,7 @@ typedef __le32 f2fs_hash_t;
6580 #define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
6581 F2FS_SLOT_LEN) * \
6582 NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
6583 +#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
6584
6585 /* One directory entry slot representing F2FS_SLOT_LEN-sized file name */
6586 struct f2fs_dir_entry {
6587 diff --git a/include/linux/filter.h b/include/linux/filter.h
6588 index e532fcc6e4b5..3358646a8e7a 100644
6589 --- a/include/linux/filter.h
6590 +++ b/include/linux/filter.h
6591 @@ -874,7 +874,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
6592 unsigned int alignment,
6593 bpf_jit_fill_hole_t bpf_fill_ill_insns);
6594 void bpf_jit_binary_free(struct bpf_binary_header *hdr);
6595 -
6596 +u64 bpf_jit_alloc_exec_limit(void);
6597 +void *bpf_jit_alloc_exec(unsigned long size);
6598 +void bpf_jit_free_exec(void *addr);
6599 void bpf_jit_free(struct bpf_prog *fp);
6600
6601 int bpf_jit_get_func_addr(const struct bpf_prog *prog,
6602 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
6603 index 65b4eaed1d96..7e748648c7d3 100644
6604 --- a/include/linux/i2c.h
6605 +++ b/include/linux/i2c.h
6606 @@ -333,6 +333,7 @@ struct i2c_client {
6607 char name[I2C_NAME_SIZE];
6608 struct i2c_adapter *adapter; /* the adapter we sit on */
6609 struct device dev; /* the device structure */
6610 + int init_irq; /* irq set at initialization */
6611 int irq; /* irq issued by device */
6612 struct list_head detected;
6613 #if IS_ENABLED(CONFIG_I2C_SLAVE)
6614 diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
6615 index dd1e40ddac7d..875c41b23f20 100644
6616 --- a/include/linux/irqdesc.h
6617 +++ b/include/linux/irqdesc.h
6618 @@ -65,6 +65,7 @@ struct irq_desc {
6619 unsigned int core_internal_state__do_not_mess_with_it;
6620 unsigned int depth; /* nested irq disables */
6621 unsigned int wake_depth; /* nested wake enables */
6622 + unsigned int tot_count;
6623 unsigned int irq_count; /* For detecting broken IRQs */
6624 unsigned long last_unhandled; /* Aging timer for unhandled count */
6625 unsigned int irqs_unhandled;
6626 diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h
6627 index d314150658a4..a61dc075e2ce 100644
6628 --- a/include/linux/kasan-checks.h
6629 +++ b/include/linux/kasan-checks.h
6630 @@ -2,7 +2,7 @@
6631 #ifndef _LINUX_KASAN_CHECKS_H
6632 #define _LINUX_KASAN_CHECKS_H
6633
6634 -#ifdef CONFIG_KASAN
6635 +#if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL)
6636 void kasan_check_read(const volatile void *p, unsigned int size);
6637 void kasan_check_write(const volatile void *p, unsigned int size);
6638 #else
6639 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
6640 index e1a051724f7e..7cbbd891bfcd 100644
6641 --- a/include/linux/perf_event.h
6642 +++ b/include/linux/perf_event.h
6643 @@ -409,7 +409,7 @@ struct pmu {
6644 /*
6645 * Set up pmu-private data structures for an AUX area
6646 */
6647 - void *(*setup_aux) (int cpu, void **pages,
6648 + void *(*setup_aux) (struct perf_event *event, void **pages,
6649 int nr_pages, bool overwrite);
6650 /* optional */
6651
6652 diff --git a/include/linux/relay.h b/include/linux/relay.h
6653 index e1bdf01a86e2..c759f96e39c1 100644
6654 --- a/include/linux/relay.h
6655 +++ b/include/linux/relay.h
6656 @@ -66,7 +66,7 @@ struct rchan
6657 struct kref kref; /* channel refcount */
6658 void *private_data; /* for user-defined data */
6659 size_t last_toobig; /* tried to log event > subbuf size */
6660 - struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */
6661 + struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */
6662 int is_global; /* One global buffer ? */
6663 struct list_head list; /* for channel list */
6664 struct dentry *parent; /* parent dentry passed to open */
6665 diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
6666 index 5b9ae62272bb..503778920448 100644
6667 --- a/include/linux/ring_buffer.h
6668 +++ b/include/linux/ring_buffer.h
6669 @@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
6670 unsigned long *lost_events);
6671
6672 struct ring_buffer_iter *
6673 -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu);
6674 +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
6675 void ring_buffer_read_prepare_sync(void);
6676 void ring_buffer_read_start(struct ring_buffer_iter *iter);
6677 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
6678 diff --git a/include/linux/sched.h b/include/linux/sched.h
6679 index f9b43c989577..9b35aff09f70 100644
6680 --- a/include/linux/sched.h
6681 +++ b/include/linux/sched.h
6682 @@ -1748,9 +1748,9 @@ static __always_inline bool need_resched(void)
6683 static inline unsigned int task_cpu(const struct task_struct *p)
6684 {
6685 #ifdef CONFIG_THREAD_INFO_IN_TASK
6686 - return p->cpu;
6687 + return READ_ONCE(p->cpu);
6688 #else
6689 - return task_thread_info(p)->cpu;
6690 + return READ_ONCE(task_thread_info(p)->cpu);
6691 #endif
6692 }
6693
6694 diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
6695 index c31d3a47a47c..57c7ed3fe465 100644
6696 --- a/include/linux/sched/topology.h
6697 +++ b/include/linux/sched/topology.h
6698 @@ -176,10 +176,10 @@ typedef int (*sched_domain_flags_f)(void);
6699 #define SDTL_OVERLAP 0x01
6700
6701 struct sd_data {
6702 - struct sched_domain **__percpu sd;
6703 - struct sched_domain_shared **__percpu sds;
6704 - struct sched_group **__percpu sg;
6705 - struct sched_group_capacity **__percpu sgc;
6706 + struct sched_domain *__percpu *sd;
6707 + struct sched_domain_shared *__percpu *sds;
6708 + struct sched_group *__percpu *sg;
6709 + struct sched_group_capacity *__percpu *sgc;
6710 };
6711
6712 struct sched_domain_topology_level {
6713 diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
6714 index 4cd56808ac4e..89808ce293c4 100644
6715 --- a/include/net/netfilter/br_netfilter.h
6716 +++ b/include/net/netfilter/br_netfilter.h
6717 @@ -43,7 +43,6 @@ static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
6718 }
6719
6720 struct net_device *setup_pre_routing(struct sk_buff *skb);
6721 -void br_netfilter_enable(void);
6722
6723 #if IS_ENABLED(CONFIG_IPV6)
6724 int br_validate_ipv6(struct net *net, struct sk_buff *skb);
6725 diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
6726 index cb8a273732cf..bb8092fa1e36 100644
6727 --- a/include/scsi/libfcoe.h
6728 +++ b/include/scsi/libfcoe.h
6729 @@ -79,7 +79,7 @@ enum fip_state {
6730 * It must not change after fcoe_ctlr_init() sets it.
6731 */
6732 enum fip_mode {
6733 - FIP_MODE_AUTO = FIP_ST_AUTO,
6734 + FIP_MODE_AUTO,
6735 FIP_MODE_NON_FIP,
6736 FIP_MODE_FABRIC,
6737 FIP_MODE_VN2VN,
6738 @@ -250,7 +250,7 @@ struct fcoe_rport {
6739 };
6740
6741 /* FIP API functions */
6742 -void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_state);
6743 +void fcoe_ctlr_init(struct fcoe_ctlr *, enum fip_mode);
6744 void fcoe_ctlr_destroy(struct fcoe_ctlr *);
6745 void fcoe_ctlr_link_up(struct fcoe_ctlr *);
6746 int fcoe_ctlr_link_down(struct fcoe_ctlr *);
6747 diff --git a/kernel/audit.h b/kernel/audit.h
6748 index 91421679a168..6ffb70575082 100644
6749 --- a/kernel/audit.h
6750 +++ b/kernel/audit.h
6751 @@ -314,7 +314,7 @@ extern void audit_trim_trees(void);
6752 extern int audit_tag_tree(char *old, char *new);
6753 extern const char *audit_tree_path(struct audit_tree *tree);
6754 extern void audit_put_tree(struct audit_tree *tree);
6755 -extern void audit_kill_trees(struct list_head *list);
6756 +extern void audit_kill_trees(struct audit_context *context);
6757 #else
6758 #define audit_remove_tree_rule(rule) BUG()
6759 #define audit_add_tree_rule(rule) -EINVAL
6760 @@ -323,7 +323,7 @@ extern void audit_kill_trees(struct list_head *list);
6761 #define audit_put_tree(tree) (void)0
6762 #define audit_tag_tree(old, new) -EINVAL
6763 #define audit_tree_path(rule) "" /* never called */
6764 -#define audit_kill_trees(list) BUG()
6765 +#define audit_kill_trees(context) BUG()
6766 #endif
6767
6768 extern char *audit_unpack_string(void **bufp, size_t *remain, size_t len);
6769 diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
6770 index d4af4d97f847..abfb112f26aa 100644
6771 --- a/kernel/audit_tree.c
6772 +++ b/kernel/audit_tree.c
6773 @@ -524,13 +524,14 @@ static int tag_chunk(struct inode *inode, struct audit_tree *tree)
6774 return 0;
6775 }
6776
6777 -static void audit_tree_log_remove_rule(struct audit_krule *rule)
6778 +static void audit_tree_log_remove_rule(struct audit_context *context,
6779 + struct audit_krule *rule)
6780 {
6781 struct audit_buffer *ab;
6782
6783 if (!audit_enabled)
6784 return;
6785 - ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
6786 + ab = audit_log_start(context, GFP_KERNEL, AUDIT_CONFIG_CHANGE);
6787 if (unlikely(!ab))
6788 return;
6789 audit_log_format(ab, "op=remove_rule dir=");
6790 @@ -540,7 +541,7 @@ static void audit_tree_log_remove_rule(struct audit_krule *rule)
6791 audit_log_end(ab);
6792 }
6793
6794 -static void kill_rules(struct audit_tree *tree)
6795 +static void kill_rules(struct audit_context *context, struct audit_tree *tree)
6796 {
6797 struct audit_krule *rule, *next;
6798 struct audit_entry *entry;
6799 @@ -551,7 +552,7 @@ static void kill_rules(struct audit_tree *tree)
6800 list_del_init(&rule->rlist);
6801 if (rule->tree) {
6802 /* not a half-baked one */
6803 - audit_tree_log_remove_rule(rule);
6804 + audit_tree_log_remove_rule(context, rule);
6805 if (entry->rule.exe)
6806 audit_remove_mark(entry->rule.exe);
6807 rule->tree = NULL;
6808 @@ -633,7 +634,7 @@ static void trim_marked(struct audit_tree *tree)
6809 tree->goner = 1;
6810 spin_unlock(&hash_lock);
6811 mutex_lock(&audit_filter_mutex);
6812 - kill_rules(tree);
6813 + kill_rules(audit_context(), tree);
6814 list_del_init(&tree->list);
6815 mutex_unlock(&audit_filter_mutex);
6816 prune_one(tree);
6817 @@ -973,8 +974,10 @@ static void audit_schedule_prune(void)
6818 * ... and that one is done if evict_chunk() decides to delay until the end
6819 * of syscall. Runs synchronously.
6820 */
6821 -void audit_kill_trees(struct list_head *list)
6822 +void audit_kill_trees(struct audit_context *context)
6823 {
6824 + struct list_head *list = &context->killed_trees;
6825 +
6826 audit_ctl_lock();
6827 mutex_lock(&audit_filter_mutex);
6828
6829 @@ -982,7 +985,7 @@ void audit_kill_trees(struct list_head *list)
6830 struct audit_tree *victim;
6831
6832 victim = list_entry(list->next, struct audit_tree, list);
6833 - kill_rules(victim);
6834 + kill_rules(context, victim);
6835 list_del_init(&victim->list);
6836
6837 mutex_unlock(&audit_filter_mutex);
6838 @@ -1017,7 +1020,7 @@ static void evict_chunk(struct audit_chunk *chunk)
6839 list_del_init(&owner->same_root);
6840 spin_unlock(&hash_lock);
6841 if (!postponed) {
6842 - kill_rules(owner);
6843 + kill_rules(audit_context(), owner);
6844 list_move(&owner->list, &prune_list);
6845 need_prune = 1;
6846 } else {
6847 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
6848 index 6593a5207fb0..b585ceb2f7a2 100644
6849 --- a/kernel/auditsc.c
6850 +++ b/kernel/auditsc.c
6851 @@ -1444,6 +1444,9 @@ void __audit_free(struct task_struct *tsk)
6852 if (!context)
6853 return;
6854
6855 + if (!list_empty(&context->killed_trees))
6856 + audit_kill_trees(context);
6857 +
6858 /* We are called either by do_exit() or the fork() error handling code;
6859 * in the former case tsk == current and in the latter tsk is a
6860 * random task_struct that doesn't doesn't have any meaningful data we
6861 @@ -1460,9 +1463,6 @@ void __audit_free(struct task_struct *tsk)
6862 audit_log_exit();
6863 }
6864
6865 - if (!list_empty(&context->killed_trees))
6866 - audit_kill_trees(&context->killed_trees);
6867 -
6868 audit_set_context(tsk, NULL);
6869 audit_free_context(context);
6870 }
6871 @@ -1537,6 +1537,9 @@ void __audit_syscall_exit(int success, long return_code)
6872 if (!context)
6873 return;
6874
6875 + if (!list_empty(&context->killed_trees))
6876 + audit_kill_trees(context);
6877 +
6878 if (!context->dummy && context->in_syscall) {
6879 if (success)
6880 context->return_valid = AUDITSC_SUCCESS;
6881 @@ -1571,9 +1574,6 @@ void __audit_syscall_exit(int success, long return_code)
6882 context->in_syscall = 0;
6883 context->prio = context->state == AUDIT_RECORD_CONTEXT ? ~0ULL : 0;
6884
6885 - if (!list_empty(&context->killed_trees))
6886 - audit_kill_trees(&context->killed_trees);
6887 -
6888 audit_free_names(context);
6889 unroll_tree_refs(context, NULL, 0);
6890 audit_free_aux(context);
6891 diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
6892 index 503bba3c4bae..f84bf28f36ba 100644
6893 --- a/kernel/cgroup/cgroup.c
6894 +++ b/kernel/cgroup/cgroup.c
6895 @@ -197,7 +197,7 @@ static u64 css_serial_nr_next = 1;
6896 */
6897 static u16 have_fork_callback __read_mostly;
6898 static u16 have_exit_callback __read_mostly;
6899 -static u16 have_free_callback __read_mostly;
6900 +static u16 have_release_callback __read_mostly;
6901 static u16 have_canfork_callback __read_mostly;
6902
6903 /* cgroup namespace for init task */
6904 @@ -5316,7 +5316,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss, bool early)
6905
6906 have_fork_callback |= (bool)ss->fork << ss->id;
6907 have_exit_callback |= (bool)ss->exit << ss->id;
6908 - have_free_callback |= (bool)ss->free << ss->id;
6909 + have_release_callback |= (bool)ss->release << ss->id;
6910 have_canfork_callback |= (bool)ss->can_fork << ss->id;
6911
6912 /* At system boot, before all subsystems have been
6913 @@ -5752,16 +5752,19 @@ void cgroup_exit(struct task_struct *tsk)
6914 } while_each_subsys_mask();
6915 }
6916
6917 -void cgroup_free(struct task_struct *task)
6918 +void cgroup_release(struct task_struct *task)
6919 {
6920 - struct css_set *cset = task_css_set(task);
6921 struct cgroup_subsys *ss;
6922 int ssid;
6923
6924 - do_each_subsys_mask(ss, ssid, have_free_callback) {
6925 - ss->free(task);
6926 + do_each_subsys_mask(ss, ssid, have_release_callback) {
6927 + ss->release(task);
6928 } while_each_subsys_mask();
6929 +}
6930
6931 +void cgroup_free(struct task_struct *task)
6932 +{
6933 + struct css_set *cset = task_css_set(task);
6934 put_css_set(cset);
6935 }
6936
6937 diff --git a/kernel/cgroup/pids.c b/kernel/cgroup/pids.c
6938 index 9829c67ebc0a..c9960baaa14f 100644
6939 --- a/kernel/cgroup/pids.c
6940 +++ b/kernel/cgroup/pids.c
6941 @@ -247,7 +247,7 @@ static void pids_cancel_fork(struct task_struct *task)
6942 pids_uncharge(pids, 1);
6943 }
6944
6945 -static void pids_free(struct task_struct *task)
6946 +static void pids_release(struct task_struct *task)
6947 {
6948 struct pids_cgroup *pids = css_pids(task_css(task, pids_cgrp_id));
6949
6950 @@ -342,7 +342,7 @@ struct cgroup_subsys pids_cgrp_subsys = {
6951 .cancel_attach = pids_cancel_attach,
6952 .can_fork = pids_can_fork,
6953 .cancel_fork = pids_cancel_fork,
6954 - .free = pids_free,
6955 + .release = pids_release,
6956 .legacy_cftypes = pids_files,
6957 .dfl_cftypes = pids_files,
6958 .threaded = true,
6959 diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
6960 index d503d1a9007c..bb95a35e8c2d 100644
6961 --- a/kernel/cgroup/rstat.c
6962 +++ b/kernel/cgroup/rstat.c
6963 @@ -87,7 +87,6 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
6964 struct cgroup *root, int cpu)
6965 {
6966 struct cgroup_rstat_cpu *rstatc;
6967 - struct cgroup *parent;
6968
6969 if (pos == root)
6970 return NULL;
6971 @@ -115,8 +114,8 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
6972 * However, due to the way we traverse, @pos will be the first
6973 * child in most cases. The only exception is @root.
6974 */
6975 - parent = cgroup_parent(pos);
6976 - if (parent && rstatc->updated_next) {
6977 + if (rstatc->updated_next) {
6978 + struct cgroup *parent = cgroup_parent(pos);
6979 struct cgroup_rstat_cpu *prstatc = cgroup_rstat_cpu(parent, cpu);
6980 struct cgroup_rstat_cpu *nrstatc;
6981 struct cgroup **nextp;
6982 @@ -140,9 +139,12 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
6983 * updated stat.
6984 */
6985 smp_mb();
6986 +
6987 + return pos;
6988 }
6989
6990 - return pos;
6991 + /* only happens for @root */
6992 + return NULL;
6993 }
6994
6995 /* see cgroup_rstat_flush() */
6996 diff --git a/kernel/cpu.c b/kernel/cpu.c
6997 index 47f695d80dd1..6754f3ecfd94 100644
6998 --- a/kernel/cpu.c
6999 +++ b/kernel/cpu.c
7000 @@ -313,6 +313,15 @@ void cpus_write_unlock(void)
7001
7002 void lockdep_assert_cpus_held(void)
7003 {
7004 + /*
7005 + * We can't have hotplug operations before userspace starts running,
7006 + * and some init codepaths will knowingly not take the hotplug lock.
7007 + * This is all valid, so mute lockdep until it makes sense to report
7008 + * unheld locks.
7009 + */
7010 + if (system_state < SYSTEM_RUNNING)
7011 + return;
7012 +
7013 percpu_rwsem_assert_held(&cpu_hotplug_lock);
7014 }
7015
7016 diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
7017 index 5ab4fe3b1dcc..878c62ec0190 100644
7018 --- a/kernel/events/ring_buffer.c
7019 +++ b/kernel/events/ring_buffer.c
7020 @@ -658,7 +658,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
7021 goto out;
7022 }
7023
7024 - rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
7025 + rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
7026 overwrite);
7027 if (!rb->aux_priv)
7028 goto out;
7029 diff --git a/kernel/exit.c b/kernel/exit.c
7030 index 2639a30a8aa5..2166c2d92ddc 100644
7031 --- a/kernel/exit.c
7032 +++ b/kernel/exit.c
7033 @@ -219,6 +219,7 @@ repeat:
7034 }
7035
7036 write_unlock_irq(&tasklist_lock);
7037 + cgroup_release(p);
7038 release_thread(p);
7039 call_rcu(&p->rcu, delayed_put_task_struct);
7040
7041 diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
7042 index 34e969069488..e960c4f46ee0 100644
7043 --- a/kernel/irq/chip.c
7044 +++ b/kernel/irq/chip.c
7045 @@ -855,7 +855,11 @@ void handle_percpu_irq(struct irq_desc *desc)
7046 {
7047 struct irq_chip *chip = irq_desc_get_chip(desc);
7048
7049 - kstat_incr_irqs_this_cpu(desc);
7050 + /*
7051 + * PER CPU interrupts are not serialized. Do not touch
7052 + * desc->tot_count.
7053 + */
7054 + __kstat_incr_irqs_this_cpu(desc);
7055
7056 if (chip->irq_ack)
7057 chip->irq_ack(&desc->irq_data);
7058 @@ -884,7 +888,11 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
7059 unsigned int irq = irq_desc_get_irq(desc);
7060 irqreturn_t res;
7061
7062 - kstat_incr_irqs_this_cpu(desc);
7063 + /*
7064 + * PER CPU interrupts are not serialized. Do not touch
7065 + * desc->tot_count.
7066 + */
7067 + __kstat_incr_irqs_this_cpu(desc);
7068
7069 if (chip->irq_ack)
7070 chip->irq_ack(&desc->irq_data);
7071 diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
7072 index ca6afa267070..e74e7eea76cf 100644
7073 --- a/kernel/irq/internals.h
7074 +++ b/kernel/irq/internals.h
7075 @@ -242,12 +242,18 @@ static inline void irq_state_set_masked(struct irq_desc *desc)
7076
7077 #undef __irqd_to_state
7078
7079 -static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
7080 +static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
7081 {
7082 __this_cpu_inc(*desc->kstat_irqs);
7083 __this_cpu_inc(kstat.irqs_sum);
7084 }
7085
7086 +static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
7087 +{
7088 + __kstat_incr_irqs_this_cpu(desc);
7089 + desc->tot_count++;
7090 +}
7091 +
7092 static inline int irq_desc_get_node(struct irq_desc *desc)
7093 {
7094 return irq_common_data_get_node(&desc->irq_common_data);
7095 diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
7096 index ef8ad36cadcf..84fa255d0329 100644
7097 --- a/kernel/irq/irqdesc.c
7098 +++ b/kernel/irq/irqdesc.c
7099 @@ -119,6 +119,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
7100 desc->depth = 1;
7101 desc->irq_count = 0;
7102 desc->irqs_unhandled = 0;
7103 + desc->tot_count = 0;
7104 desc->name = NULL;
7105 desc->owner = owner;
7106 for_each_possible_cpu(cpu)
7107 @@ -919,11 +920,15 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
7108 unsigned int kstat_irqs(unsigned int irq)
7109 {
7110 struct irq_desc *desc = irq_to_desc(irq);
7111 - int cpu;
7112 unsigned int sum = 0;
7113 + int cpu;
7114
7115 if (!desc || !desc->kstat_irqs)
7116 return 0;
7117 + if (!irq_settings_is_per_cpu_devid(desc) &&
7118 + !irq_settings_is_per_cpu(desc))
7119 + return desc->tot_count;
7120 +
7121 for_each_possible_cpu(cpu)
7122 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
7123 return sum;
7124 diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
7125 index 1971869c4072..f4ca36d92138 100644
7126 --- a/kernel/rcu/update.c
7127 +++ b/kernel/rcu/update.c
7128 @@ -52,6 +52,7 @@
7129 #include <linux/tick.h>
7130 #include <linux/rcupdate_wait.h>
7131 #include <linux/sched/isolation.h>
7132 +#include <linux/kprobes.h>
7133
7134 #define CREATE_TRACE_POINTS
7135
7136 @@ -249,6 +250,7 @@ int notrace debug_lockdep_rcu_enabled(void)
7137 current->lockdep_recursion == 0;
7138 }
7139 EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
7140 +NOKPROBE_SYMBOL(debug_lockdep_rcu_enabled);
7141
7142 /**
7143 * rcu_read_lock_held() - might we be in RCU read-side critical section?
7144 diff --git a/kernel/resource.c b/kernel/resource.c
7145 index 915c02e8e5dd..ca7ed5158cff 100644
7146 --- a/kernel/resource.c
7147 +++ b/kernel/resource.c
7148 @@ -382,7 +382,7 @@ static int __walk_iomem_res_desc(resource_size_t start, resource_size_t end,
7149 int (*func)(struct resource *, void *))
7150 {
7151 struct resource res;
7152 - int ret = -1;
7153 + int ret = -EINVAL;
7154
7155 while (start < end &&
7156 !find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) {
7157 @@ -462,7 +462,7 @@ int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
7158 unsigned long flags;
7159 struct resource res;
7160 unsigned long pfn, end_pfn;
7161 - int ret = -1;
7162 + int ret = -EINVAL;
7163
7164 start = (u64) start_pfn << PAGE_SHIFT;
7165 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1;
7166 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
7167 index d8d76a65cfdd..01a2489de94e 100644
7168 --- a/kernel/sched/core.c
7169 +++ b/kernel/sched/core.c
7170 @@ -107,11 +107,12 @@ struct rq *task_rq_lock(struct task_struct *p, struct rq_flags *rf)
7171 * [L] ->on_rq
7172 * RELEASE (rq->lock)
7173 *
7174 - * If we observe the old CPU in task_rq_lock, the acquire of
7175 + * If we observe the old CPU in task_rq_lock(), the acquire of
7176 * the old rq->lock will fully serialize against the stores.
7177 *
7178 - * If we observe the new CPU in task_rq_lock, the acquire will
7179 - * pair with the WMB to ensure we must then also see migrating.
7180 + * If we observe the new CPU in task_rq_lock(), the address
7181 + * dependency headed by '[L] rq = task_rq()' and the acquire
7182 + * will pair with the WMB to ensure we then also see migrating.
7183 */
7184 if (likely(rq == task_rq(p) && !task_on_rq_migrating(p))) {
7185 rq_pin_lock(rq, rf);
7186 @@ -928,7 +929,7 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
7187 {
7188 lockdep_assert_held(&rq->lock);
7189
7190 - p->on_rq = TASK_ON_RQ_MIGRATING;
7191 + WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
7192 dequeue_task(rq, p, DEQUEUE_NOCLOCK);
7193 set_task_cpu(p, new_cpu);
7194 rq_unlock(rq, rf);
7195 diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
7196 index de3de997e245..8039d62ae36e 100644
7197 --- a/kernel/sched/debug.c
7198 +++ b/kernel/sched/debug.c
7199 @@ -315,6 +315,7 @@ void register_sched_domain_sysctl(void)
7200 {
7201 static struct ctl_table *cpu_entries;
7202 static struct ctl_table **cpu_idx;
7203 + static bool init_done = false;
7204 char buf[32];
7205 int i;
7206
7207 @@ -344,7 +345,10 @@ void register_sched_domain_sysctl(void)
7208 if (!cpumask_available(sd_sysctl_cpus)) {
7209 if (!alloc_cpumask_var(&sd_sysctl_cpus, GFP_KERNEL))
7210 return;
7211 + }
7212
7213 + if (!init_done) {
7214 + init_done = true;
7215 /* init to possible to not have holes in @cpu_entries */
7216 cpumask_copy(sd_sysctl_cpus, cpu_possible_mask);
7217 }
7218 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
7219 index d04530bf251f..425a5589e5f6 100644
7220 --- a/kernel/sched/sched.h
7221 +++ b/kernel/sched/sched.h
7222 @@ -1460,9 +1460,9 @@ static inline void __set_task_cpu(struct task_struct *p, unsigned int cpu)
7223 */
7224 smp_wmb();
7225 #ifdef CONFIG_THREAD_INFO_IN_TASK
7226 - p->cpu = cpu;
7227 + WRITE_ONCE(p->cpu, cpu);
7228 #else
7229 - task_thread_info(p)->cpu = cpu;
7230 + WRITE_ONCE(task_thread_info(p)->cpu, cpu);
7231 #endif
7232 p->wake_cpu = cpu;
7233 #endif
7234 @@ -1563,7 +1563,7 @@ static inline int task_on_rq_queued(struct task_struct *p)
7235
7236 static inline int task_on_rq_migrating(struct task_struct *p)
7237 {
7238 - return p->on_rq == TASK_ON_RQ_MIGRATING;
7239 + return READ_ONCE(p->on_rq) == TASK_ON_RQ_MIGRATING;
7240 }
7241
7242 /*
7243 diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
7244 index 3f35ba1d8fde..efca2489d881 100644
7245 --- a/kernel/sched/topology.c
7246 +++ b/kernel/sched/topology.c
7247 @@ -676,7 +676,7 @@ cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu)
7248 }
7249
7250 struct s_data {
7251 - struct sched_domain ** __percpu sd;
7252 + struct sched_domain * __percpu *sd;
7253 struct root_domain *rd;
7254 };
7255
7256 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
7257 index d80bee8ff12e..28ec71d914c7 100644
7258 --- a/kernel/sysctl.c
7259 +++ b/kernel/sysctl.c
7260 @@ -127,6 +127,7 @@ static int __maybe_unused one = 1;
7261 static int __maybe_unused two = 2;
7262 static int __maybe_unused four = 4;
7263 static unsigned long one_ul = 1;
7264 +static unsigned long long_max = LONG_MAX;
7265 static int one_hundred = 100;
7266 static int one_thousand = 1000;
7267 #ifdef CONFIG_PRINTK
7268 @@ -1722,6 +1723,8 @@ static struct ctl_table fs_table[] = {
7269 .maxlen = sizeof(files_stat.max_files),
7270 .mode = 0644,
7271 .proc_handler = proc_doulongvec_minmax,
7272 + .extra1 = &zero,
7273 + .extra2 = &long_max,
7274 },
7275 {
7276 .procname = "nr_open",
7277 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
7278 index 06e864a334bb..b49affb4666b 100644
7279 --- a/kernel/trace/ring_buffer.c
7280 +++ b/kernel/trace/ring_buffer.c
7281 @@ -4205,6 +4205,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
7282 * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
7283 * @buffer: The ring buffer to read from
7284 * @cpu: The cpu buffer to iterate over
7285 + * @flags: gfp flags to use for memory allocation
7286 *
7287 * This performs the initial preparations necessary to iterate
7288 * through the buffer. Memory is allocated, buffer recording
7289 @@ -4222,7 +4223,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
7290 * This overall must be paired with ring_buffer_read_finish.
7291 */
7292 struct ring_buffer_iter *
7293 -ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
7294 +ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags)
7295 {
7296 struct ring_buffer_per_cpu *cpu_buffer;
7297 struct ring_buffer_iter *iter;
7298 @@ -4230,7 +4231,7 @@ ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
7299 if (!cpumask_test_cpu(cpu, buffer->cpumask))
7300 return NULL;
7301
7302 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
7303 + iter = kmalloc(sizeof(*iter), flags);
7304 if (!iter)
7305 return NULL;
7306
7307 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
7308 index 5f40db27aaf2..89158aa93fa6 100644
7309 --- a/kernel/trace/trace.c
7310 +++ b/kernel/trace/trace.c
7311 @@ -3904,7 +3904,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
7312 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
7313 for_each_tracing_cpu(cpu) {
7314 iter->buffer_iter[cpu] =
7315 - ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
7316 + ring_buffer_read_prepare(iter->trace_buffer->buffer,
7317 + cpu, GFP_KERNEL);
7318 }
7319 ring_buffer_read_prepare_sync();
7320 for_each_tracing_cpu(cpu) {
7321 @@ -3914,7 +3915,8 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
7322 } else {
7323 cpu = iter->cpu_file;
7324 iter->buffer_iter[cpu] =
7325 - ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
7326 + ring_buffer_read_prepare(iter->trace_buffer->buffer,
7327 + cpu, GFP_KERNEL);
7328 ring_buffer_read_prepare_sync();
7329 ring_buffer_read_start(iter->buffer_iter[cpu]);
7330 tracing_iter_reset(iter, cpu);
7331 diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
7332 index d953c163a079..810d78a8d14c 100644
7333 --- a/kernel/trace/trace_kdb.c
7334 +++ b/kernel/trace/trace_kdb.c
7335 @@ -51,14 +51,16 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
7336 if (cpu_file == RING_BUFFER_ALL_CPUS) {
7337 for_each_tracing_cpu(cpu) {
7338 iter.buffer_iter[cpu] =
7339 - ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
7340 + ring_buffer_read_prepare(iter.trace_buffer->buffer,
7341 + cpu, GFP_ATOMIC);
7342 ring_buffer_read_start(iter.buffer_iter[cpu]);
7343 tracing_iter_reset(&iter, cpu);
7344 }
7345 } else {
7346 iter.cpu_file = cpu_file;
7347 iter.buffer_iter[cpu_file] =
7348 - ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
7349 + ring_buffer_read_prepare(iter.trace_buffer->buffer,
7350 + cpu_file, GFP_ATOMIC);
7351 ring_buffer_read_start(iter.buffer_iter[cpu_file]);
7352 tracing_iter_reset(&iter, cpu_file);
7353 }
7354 diff --git a/lib/bsearch.c b/lib/bsearch.c
7355 index 18b445b010c3..82512fe7b33c 100644
7356 --- a/lib/bsearch.c
7357 +++ b/lib/bsearch.c
7358 @@ -11,6 +11,7 @@
7359
7360 #include <linux/export.h>
7361 #include <linux/bsearch.h>
7362 +#include <linux/kprobes.h>
7363
7364 /*
7365 * bsearch - binary search an array of elements
7366 @@ -53,3 +54,4 @@ void *bsearch(const void *key, const void *base, size_t num, size_t size,
7367 return NULL;
7368 }
7369 EXPORT_SYMBOL(bsearch);
7370 +NOKPROBE_SYMBOL(bsearch);
7371 diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
7372 index 4e90d443d1b0..e723eacf7868 100644
7373 --- a/lib/raid6/Makefile
7374 +++ b/lib/raid6/Makefile
7375 @@ -39,7 +39,7 @@ endif
7376 ifeq ($(CONFIG_KERNEL_MODE_NEON),y)
7377 NEON_FLAGS := -ffreestanding
7378 ifeq ($(ARCH),arm)
7379 -NEON_FLAGS += -mfloat-abi=softfp -mfpu=neon
7380 +NEON_FLAGS += -march=armv7-a -mfloat-abi=softfp -mfpu=neon
7381 endif
7382 CFLAGS_recov_neon_inner.o += $(NEON_FLAGS)
7383 ifeq ($(ARCH),arm64)
7384 diff --git a/mm/cma.c b/mm/cma.c
7385 index c7b39dd3b4f6..f4f3a8a57d86 100644
7386 --- a/mm/cma.c
7387 +++ b/mm/cma.c
7388 @@ -353,12 +353,14 @@ int __init cma_declare_contiguous(phys_addr_t base,
7389
7390 ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
7391 if (ret)
7392 - goto err;
7393 + goto free_mem;
7394
7395 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
7396 &base);
7397 return 0;
7398
7399 +free_mem:
7400 + memblock_free(base, size);
7401 err:
7402 pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
7403 return ret;
7404 diff --git a/mm/kasan/common.c b/mm/kasan/common.c
7405 index 09b534fbba17..80bbe62b16cd 100644
7406 --- a/mm/kasan/common.c
7407 +++ b/mm/kasan/common.c
7408 @@ -14,6 +14,8 @@
7409 *
7410 */
7411
7412 +#define __KASAN_INTERNAL
7413 +
7414 #include <linux/export.h>
7415 #include <linux/interrupt.h>
7416 #include <linux/init.h>
7417 diff --git a/mm/memcontrol.c b/mm/memcontrol.c
7418 index af7f18b32389..79a7d2a06bba 100644
7419 --- a/mm/memcontrol.c
7420 +++ b/mm/memcontrol.c
7421 @@ -248,6 +248,12 @@ enum res_type {
7422 iter != NULL; \
7423 iter = mem_cgroup_iter(NULL, iter, NULL))
7424
7425 +static inline bool should_force_charge(void)
7426 +{
7427 + return tsk_is_oom_victim(current) || fatal_signal_pending(current) ||
7428 + (current->flags & PF_EXITING);
7429 +}
7430 +
7431 /* Some nice accessors for the vmpressure. */
7432 struct vmpressure *memcg_to_vmpressure(struct mem_cgroup *memcg)
7433 {
7434 @@ -1389,8 +1395,13 @@ static bool mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
7435 };
7436 bool ret;
7437
7438 - mutex_lock(&oom_lock);
7439 - ret = out_of_memory(&oc);
7440 + if (mutex_lock_killable(&oom_lock))
7441 + return true;
7442 + /*
7443 + * A few threads which were not waiting at mutex_lock_killable() can
7444 + * fail to bail out. Therefore, check again after holding oom_lock.
7445 + */
7446 + ret = should_force_charge() || out_of_memory(&oc);
7447 mutex_unlock(&oom_lock);
7448 return ret;
7449 }
7450 @@ -2209,9 +2220,7 @@ retry:
7451 * bypass the last charges so that they can exit quickly and
7452 * free their memory.
7453 */
7454 - if (unlikely(tsk_is_oom_victim(current) ||
7455 - fatal_signal_pending(current) ||
7456 - current->flags & PF_EXITING))
7457 + if (unlikely(should_force_charge()))
7458 goto force;
7459
7460 /*
7461 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
7462 index 6bc9786aad6e..c2275c1e6d2a 100644
7463 --- a/mm/mempolicy.c
7464 +++ b/mm/mempolicy.c
7465 @@ -350,7 +350,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask)
7466 {
7467 if (!pol)
7468 return;
7469 - if (!mpol_store_user_nodemask(pol) &&
7470 + if (!mpol_store_user_nodemask(pol) && !(pol->flags & MPOL_F_LOCAL) &&
7471 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
7472 return;
7473
7474 diff --git a/mm/oom_kill.c b/mm/oom_kill.c
7475 index 26ea8636758f..da0e44914085 100644
7476 --- a/mm/oom_kill.c
7477 +++ b/mm/oom_kill.c
7478 @@ -928,7 +928,8 @@ static void __oom_kill_process(struct task_struct *victim)
7479 */
7480 static int oom_kill_memcg_member(struct task_struct *task, void *unused)
7481 {
7482 - if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN) {
7483 + if (task->signal->oom_score_adj != OOM_SCORE_ADJ_MIN &&
7484 + !is_global_init(task)) {
7485 get_task_struct(task);
7486 __oom_kill_process(task);
7487 }
7488 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
7489 index 11dc3c0e8728..20dd3283bb1b 100644
7490 --- a/mm/page_alloc.c
7491 +++ b/mm/page_alloc.c
7492 @@ -1945,8 +1945,8 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
7493
7494 arch_alloc_page(page, order);
7495 kernel_map_pages(page, 1 << order, 1);
7496 - kernel_poison_pages(page, 1 << order, 1);
7497 kasan_alloc_pages(page, order);
7498 + kernel_poison_pages(page, 1 << order, 1);
7499 set_page_owner(page, order, gfp_flags);
7500 }
7501
7502 diff --git a/mm/page_ext.c b/mm/page_ext.c
7503 index 8c78b8d45117..f116431c3dee 100644
7504 --- a/mm/page_ext.c
7505 +++ b/mm/page_ext.c
7506 @@ -273,6 +273,7 @@ static void free_page_ext(void *addr)
7507 table_size = get_entry_size() * PAGES_PER_SECTION;
7508
7509 BUG_ON(PageReserved(page));
7510 + kmemleak_free(addr);
7511 free_pages_exact(addr, table_size);
7512 }
7513 }
7514 diff --git a/mm/page_poison.c b/mm/page_poison.c
7515 index f0c15e9017c0..21d4f97cb49b 100644
7516 --- a/mm/page_poison.c
7517 +++ b/mm/page_poison.c
7518 @@ -6,6 +6,7 @@
7519 #include <linux/page_ext.h>
7520 #include <linux/poison.h>
7521 #include <linux/ratelimit.h>
7522 +#include <linux/kasan.h>
7523
7524 static bool want_page_poisoning __read_mostly;
7525
7526 @@ -40,7 +41,10 @@ static void poison_page(struct page *page)
7527 {
7528 void *addr = kmap_atomic(page);
7529
7530 + /* KASAN still think the page is in-use, so skip it. */
7531 + kasan_disable_current();
7532 memset(addr, PAGE_POISON, PAGE_SIZE);
7533 + kasan_enable_current();
7534 kunmap_atomic(addr);
7535 }
7536
7537 diff --git a/mm/slab.c b/mm/slab.c
7538 index b3e74b56a468..2f2aa8eaf7d9 100644
7539 --- a/mm/slab.c
7540 +++ b/mm/slab.c
7541 @@ -550,14 +550,6 @@ static void start_cpu_timer(int cpu)
7542
7543 static void init_arraycache(struct array_cache *ac, int limit, int batch)
7544 {
7545 - /*
7546 - * The array_cache structures contain pointers to free object.
7547 - * However, when such objects are allocated or transferred to another
7548 - * cache the pointers are not cleared and they could be counted as
7549 - * valid references during a kmemleak scan. Therefore, kmemleak must
7550 - * not scan such objects.
7551 - */
7552 - kmemleak_no_scan(ac);
7553 if (ac) {
7554 ac->avail = 0;
7555 ac->limit = limit;
7556 @@ -573,6 +565,14 @@ static struct array_cache *alloc_arraycache(int node, int entries,
7557 struct array_cache *ac = NULL;
7558
7559 ac = kmalloc_node(memsize, gfp, node);
7560 + /*
7561 + * The array_cache structures contain pointers to free object.
7562 + * However, when such objects are allocated or transferred to another
7563 + * cache the pointers are not cleared and they could be counted as
7564 + * valid references during a kmemleak scan. Therefore, kmemleak must
7565 + * not scan such objects.
7566 + */
7567 + kmemleak_no_scan(ac);
7568 init_arraycache(ac, entries, batchcount);
7569 return ac;
7570 }
7571 @@ -667,6 +667,7 @@ static struct alien_cache *__alloc_alien_cache(int node, int entries,
7572
7573 alc = kmalloc_node(memsize, gfp, node);
7574 if (alc) {
7575 + kmemleak_no_scan(alc);
7576 init_arraycache(&alc->ac, entries, batch);
7577 spin_lock_init(&alc->lock);
7578 }
7579 diff --git a/mm/sparse.c b/mm/sparse.c
7580 index 4763519d4399..b3771f35a0ed 100644
7581 --- a/mm/sparse.c
7582 +++ b/mm/sparse.c
7583 @@ -197,7 +197,7 @@ static inline int next_present_section_nr(int section_nr)
7584 }
7585 #define for_each_present_section_nr(start, section_nr) \
7586 for (section_nr = next_present_section_nr(start-1); \
7587 - ((section_nr >= 0) && \
7588 + ((section_nr != -1) && \
7589 (section_nr <= __highest_present_section_nr)); \
7590 section_nr = next_present_section_nr(section_nr))
7591
7592 diff --git a/mm/swapfile.c b/mm/swapfile.c
7593 index dbac1d49469d..67f60e051814 100644
7594 --- a/mm/swapfile.c
7595 +++ b/mm/swapfile.c
7596 @@ -98,6 +98,15 @@ static atomic_t proc_poll_event = ATOMIC_INIT(0);
7597
7598 atomic_t nr_rotate_swap = ATOMIC_INIT(0);
7599
7600 +static struct swap_info_struct *swap_type_to_swap_info(int type)
7601 +{
7602 + if (type >= READ_ONCE(nr_swapfiles))
7603 + return NULL;
7604 +
7605 + smp_rmb(); /* Pairs with smp_wmb in alloc_swap_info. */
7606 + return READ_ONCE(swap_info[type]);
7607 +}
7608 +
7609 static inline unsigned char swap_count(unsigned char ent)
7610 {
7611 return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
7612 @@ -1044,12 +1053,14 @@ noswap:
7613 /* The only caller of this function is now suspend routine */
7614 swp_entry_t get_swap_page_of_type(int type)
7615 {
7616 - struct swap_info_struct *si;
7617 + struct swap_info_struct *si = swap_type_to_swap_info(type);
7618 pgoff_t offset;
7619
7620 - si = swap_info[type];
7621 + if (!si)
7622 + goto fail;
7623 +
7624 spin_lock(&si->lock);
7625 - if (si && (si->flags & SWP_WRITEOK)) {
7626 + if (si->flags & SWP_WRITEOK) {
7627 atomic_long_dec(&nr_swap_pages);
7628 /* This is called for allocating swap entry, not cache */
7629 offset = scan_swap_map(si, 1);
7630 @@ -1060,6 +1071,7 @@ swp_entry_t get_swap_page_of_type(int type)
7631 atomic_long_inc(&nr_swap_pages);
7632 }
7633 spin_unlock(&si->lock);
7634 +fail:
7635 return (swp_entry_t) {0};
7636 }
7637
7638 @@ -1071,9 +1083,9 @@ static struct swap_info_struct *__swap_info_get(swp_entry_t entry)
7639 if (!entry.val)
7640 goto out;
7641 type = swp_type(entry);
7642 - if (type >= nr_swapfiles)
7643 + p = swap_type_to_swap_info(type);
7644 + if (!p)
7645 goto bad_nofile;
7646 - p = swap_info[type];
7647 if (!(p->flags & SWP_USED))
7648 goto bad_device;
7649 offset = swp_offset(entry);
7650 @@ -1697,10 +1709,9 @@ int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p)
7651 sector_t swapdev_block(int type, pgoff_t offset)
7652 {
7653 struct block_device *bdev;
7654 + struct swap_info_struct *si = swap_type_to_swap_info(type);
7655
7656 - if ((unsigned int)type >= nr_swapfiles)
7657 - return 0;
7658 - if (!(swap_info[type]->flags & SWP_WRITEOK))
7659 + if (!si || !(si->flags & SWP_WRITEOK))
7660 return 0;
7661 return map_swap_entry(swp_entry(type, offset), &bdev);
7662 }
7663 @@ -2258,7 +2269,7 @@ static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev)
7664 struct swap_extent *se;
7665 pgoff_t offset;
7666
7667 - sis = swap_info[swp_type(entry)];
7668 + sis = swp_swap_info(entry);
7669 *bdev = sis->bdev;
7670
7671 offset = swp_offset(entry);
7672 @@ -2700,9 +2711,7 @@ static void *swap_start(struct seq_file *swap, loff_t *pos)
7673 if (!l)
7674 return SEQ_START_TOKEN;
7675
7676 - for (type = 0; type < nr_swapfiles; type++) {
7677 - smp_rmb(); /* read nr_swapfiles before swap_info[type] */
7678 - si = swap_info[type];
7679 + for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
7680 if (!(si->flags & SWP_USED) || !si->swap_map)
7681 continue;
7682 if (!--l)
7683 @@ -2722,9 +2731,7 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
7684 else
7685 type = si->type + 1;
7686
7687 - for (; type < nr_swapfiles; type++) {
7688 - smp_rmb(); /* read nr_swapfiles before swap_info[type] */
7689 - si = swap_info[type];
7690 + for (; (si = swap_type_to_swap_info(type)); type++) {
7691 if (!(si->flags & SWP_USED) || !si->swap_map)
7692 continue;
7693 ++*pos;
7694 @@ -2831,14 +2838,14 @@ static struct swap_info_struct *alloc_swap_info(void)
7695 }
7696 if (type >= nr_swapfiles) {
7697 p->type = type;
7698 - swap_info[type] = p;
7699 + WRITE_ONCE(swap_info[type], p);
7700 /*
7701 * Write swap_info[type] before nr_swapfiles, in case a
7702 * racing procfs swap_start() or swap_next() is reading them.
7703 * (We never shrink nr_swapfiles, we never free this entry.)
7704 */
7705 smp_wmb();
7706 - nr_swapfiles++;
7707 + WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1);
7708 } else {
7709 kvfree(p);
7710 p = swap_info[type];
7711 @@ -3358,7 +3365,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
7712 {
7713 struct swap_info_struct *p;
7714 struct swap_cluster_info *ci;
7715 - unsigned long offset, type;
7716 + unsigned long offset;
7717 unsigned char count;
7718 unsigned char has_cache;
7719 int err = -EINVAL;
7720 @@ -3366,10 +3373,10 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage)
7721 if (non_swap_entry(entry))
7722 goto out;
7723
7724 - type = swp_type(entry);
7725 - if (type >= nr_swapfiles)
7726 + p = swp_swap_info(entry);
7727 + if (!p)
7728 goto bad_file;
7729 - p = swap_info[type];
7730 +
7731 offset = swp_offset(entry);
7732 if (unlikely(offset >= p->max))
7733 goto out;
7734 @@ -3466,7 +3473,7 @@ int swapcache_prepare(swp_entry_t entry)
7735
7736 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
7737 {
7738 - return swap_info[swp_type(entry)];
7739 + return swap_type_to_swap_info(swp_type(entry));
7740 }
7741
7742 struct swap_info_struct *page_swap_info(struct page *page)
7743 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
7744 index 2cd24186ba84..583630bf247d 100644
7745 --- a/mm/vmalloc.c
7746 +++ b/mm/vmalloc.c
7747 @@ -498,7 +498,11 @@ nocache:
7748 }
7749
7750 found:
7751 - if (addr + size > vend)
7752 + /*
7753 + * Check also calculated address against the vstart,
7754 + * because it can be 0 because of big align request.
7755 + */
7756 + if (addr + size > vend || addr < vstart)
7757 goto overflow;
7758
7759 va->va_start = addr;
7760 diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c
7761 index c93c35bb73dd..40d058378b52 100644
7762 --- a/net/bridge/br_netfilter_hooks.c
7763 +++ b/net/bridge/br_netfilter_hooks.c
7764 @@ -881,11 +881,6 @@ static const struct nf_br_ops br_ops = {
7765 .br_dev_xmit_hook = br_nf_dev_xmit,
7766 };
7767
7768 -void br_netfilter_enable(void)
7769 -{
7770 -}
7771 -EXPORT_SYMBOL_GPL(br_netfilter_enable);
7772 -
7773 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
7774 * br_dev_queue_push_xmit is called afterwards */
7775 static const struct nf_hook_ops br_nf_ops[] = {
7776 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
7777 index db4d46332e86..9dd4c2048a2b 100644
7778 --- a/net/netfilter/nf_conntrack_core.c
7779 +++ b/net/netfilter/nf_conntrack_core.c
7780 @@ -901,10 +901,18 @@ __nf_conntrack_confirm(struct sk_buff *skb)
7781 * REJECT will give spurious warnings here.
7782 */
7783
7784 - /* No external references means no one else could have
7785 - * confirmed us.
7786 + /* Another skb with the same unconfirmed conntrack may
7787 + * win the race. This may happen for bridge(br_flood)
7788 + * or broadcast/multicast packets do skb_clone with
7789 + * unconfirmed conntrack.
7790 */
7791 - WARN_ON(nf_ct_is_confirmed(ct));
7792 + if (unlikely(nf_ct_is_confirmed(ct))) {
7793 + WARN_ON_ONCE(1);
7794 + nf_conntrack_double_unlock(hash, reply_hash);
7795 + local_bh_enable();
7796 + return NF_DROP;
7797 + }
7798 +
7799 pr_debug("Confirming conntrack %p\n", ct);
7800 /* We have to check the DYING flag after unlink to prevent
7801 * a race against nf_ct_get_next_corpse() possibly called from
7802 diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
7803 index 4dcbd51a8e97..74fb3fa34db4 100644
7804 --- a/net/netfilter/nf_conntrack_proto_tcp.c
7805 +++ b/net/netfilter/nf_conntrack_proto_tcp.c
7806 @@ -828,6 +828,12 @@ static noinline bool tcp_new(struct nf_conn *ct, const struct sk_buff *skb,
7807 return true;
7808 }
7809
7810 +static bool nf_conntrack_tcp_established(const struct nf_conn *ct)
7811 +{
7812 + return ct->proto.tcp.state == TCP_CONNTRACK_ESTABLISHED &&
7813 + test_bit(IPS_ASSURED_BIT, &ct->status);
7814 +}
7815 +
7816 /* Returns verdict for packet, or -1 for invalid. */
7817 static int tcp_packet(struct nf_conn *ct,
7818 struct sk_buff *skb,
7819 @@ -1030,16 +1036,38 @@ static int tcp_packet(struct nf_conn *ct,
7820 new_state = TCP_CONNTRACK_ESTABLISHED;
7821 break;
7822 case TCP_CONNTRACK_CLOSE:
7823 - if (index == TCP_RST_SET
7824 - && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
7825 - && before(ntohl(th->seq), ct->proto.tcp.seen[!dir].td_maxack)) {
7826 - /* Invalid RST */
7827 - spin_unlock_bh(&ct->lock);
7828 - nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
7829 - return -NF_ACCEPT;
7830 + if (index != TCP_RST_SET)
7831 + break;
7832 +
7833 + if (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET) {
7834 + u32 seq = ntohl(th->seq);
7835 +
7836 + if (before(seq, ct->proto.tcp.seen[!dir].td_maxack)) {
7837 + /* Invalid RST */
7838 + spin_unlock_bh(&ct->lock);
7839 + nf_ct_l4proto_log_invalid(skb, ct, "invalid rst");
7840 + return -NF_ACCEPT;
7841 + }
7842 +
7843 + if (!nf_conntrack_tcp_established(ct) ||
7844 + seq == ct->proto.tcp.seen[!dir].td_maxack)
7845 + break;
7846 +
7847 + /* Check if rst is part of train, such as
7848 + * foo:80 > bar:4379: P, 235946583:235946602(19) ack 42
7849 + * foo:80 > bar:4379: R, 235946602:235946602(0) ack 42
7850 + */
7851 + if (ct->proto.tcp.last_index == TCP_ACK_SET &&
7852 + ct->proto.tcp.last_dir == dir &&
7853 + seq == ct->proto.tcp.last_end)
7854 + break;
7855 +
7856 + /* ... RST sequence number doesn't match exactly, keep
7857 + * established state to allow a possible challenge ACK.
7858 + */
7859 + new_state = old_state;
7860 }
7861 - if (index == TCP_RST_SET
7862 - && ((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
7863 + if (((test_bit(IPS_SEEN_REPLY_BIT, &ct->status)
7864 && ct->proto.tcp.last_index == TCP_SYN_SET)
7865 || (!test_bit(IPS_ASSURED_BIT, &ct->status)
7866 && ct->proto.tcp.last_index == TCP_ACK_SET))
7867 @@ -1055,7 +1083,7 @@ static int tcp_packet(struct nf_conn *ct,
7868 * segments we ignored. */
7869 goto in_window;
7870 }
7871 - /* Just fall through */
7872 + break;
7873 default:
7874 /* Keep compilers happy. */
7875 break;
7876 @@ -1090,6 +1118,8 @@ static int tcp_packet(struct nf_conn *ct,
7877 if (ct->proto.tcp.retrans >= tn->tcp_max_retrans &&
7878 timeouts[new_state] > timeouts[TCP_CONNTRACK_RETRANS])
7879 timeout = timeouts[TCP_CONNTRACK_RETRANS];
7880 + else if (unlikely(index == TCP_RST_SET))
7881 + timeout = timeouts[TCP_CONNTRACK_CLOSE];
7882 else if ((ct->proto.tcp.seen[0].flags | ct->proto.tcp.seen[1].flags) &
7883 IP_CT_TCP_FLAG_DATA_UNACKNOWLEDGED &&
7884 timeouts[new_state] > timeouts[TCP_CONNTRACK_UNACK])
7885 diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
7886 index a50500232b0a..7e8dae82ca52 100644
7887 --- a/net/netfilter/nf_tables_core.c
7888 +++ b/net/netfilter/nf_tables_core.c
7889 @@ -98,21 +98,23 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain,
7890 const struct nft_pktinfo *pkt)
7891 {
7892 struct nft_base_chain *base_chain;
7893 + struct nft_stats __percpu *pstats;
7894 struct nft_stats *stats;
7895
7896 base_chain = nft_base_chain(chain);
7897 - if (!rcu_access_pointer(base_chain->stats))
7898 - return;
7899
7900 - local_bh_disable();
7901 - stats = this_cpu_ptr(rcu_dereference(base_chain->stats));
7902 - if (stats) {
7903 + rcu_read_lock();
7904 + pstats = READ_ONCE(base_chain->stats);
7905 + if (pstats) {
7906 + local_bh_disable();
7907 + stats = this_cpu_ptr(pstats);
7908 u64_stats_update_begin(&stats->syncp);
7909 stats->pkts++;
7910 stats->bytes += pkt->skb->len;
7911 u64_stats_update_end(&stats->syncp);
7912 + local_bh_enable();
7913 }
7914 - local_bh_enable();
7915 + rcu_read_unlock();
7916 }
7917
7918 struct nft_jumpstack {
7919 diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
7920 index 4034d70bff39..b2e39cb6a590 100644
7921 --- a/net/netfilter/xt_physdev.c
7922 +++ b/net/netfilter/xt_physdev.c
7923 @@ -96,8 +96,7 @@ match_outdev:
7924 static int physdev_mt_check(const struct xt_mtchk_param *par)
7925 {
7926 const struct xt_physdev_info *info = par->matchinfo;
7927 -
7928 - br_netfilter_enable();
7929 + static bool brnf_probed __read_mostly;
7930
7931 if (!(info->bitmask & XT_PHYSDEV_OP_MASK) ||
7932 info->bitmask & ~XT_PHYSDEV_OP_MASK)
7933 @@ -111,6 +110,12 @@ static int physdev_mt_check(const struct xt_mtchk_param *par)
7934 if (par->hook_mask & (1 << NF_INET_LOCAL_OUT))
7935 return -EINVAL;
7936 }
7937 +
7938 + if (!brnf_probed) {
7939 + brnf_probed = true;
7940 + request_module("br_netfilter");
7941 + }
7942 +
7943 return 0;
7944 }
7945
7946 diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
7947 index 85e4fe4f18cc..f3031c8907d9 100644
7948 --- a/net/xdp/xsk.c
7949 +++ b/net/xdp/xsk.c
7950 @@ -407,6 +407,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
7951 if (sxdp->sxdp_family != AF_XDP)
7952 return -EINVAL;
7953
7954 + flags = sxdp->sxdp_flags;
7955 + if (flags & ~(XDP_SHARED_UMEM | XDP_COPY | XDP_ZEROCOPY))
7956 + return -EINVAL;
7957 +
7958 mutex_lock(&xs->mutex);
7959 if (xs->dev) {
7960 err = -EBUSY;
7961 @@ -425,7 +429,6 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
7962 }
7963
7964 qid = sxdp->sxdp_queue_id;
7965 - flags = sxdp->sxdp_flags;
7966
7967 if (flags & XDP_SHARED_UMEM) {
7968 struct xdp_sock *umem_xs;
7969 diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
7970 index 379682e2a8d5..f6c2bcb2ab14 100644
7971 --- a/security/apparmor/policy_unpack.c
7972 +++ b/security/apparmor/policy_unpack.c
7973 @@ -579,6 +579,7 @@ fail:
7974 kfree(profile->secmark[i].label);
7975 kfree(profile->secmark);
7976 profile->secmark_count = 0;
7977 + profile->secmark = NULL;
7978 }
7979
7980 e->pos = pos;
7981 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
7982 index cf20dd36a30f..07b11b5aaf1f 100644
7983 --- a/security/selinux/hooks.c
7984 +++ b/security/selinux/hooks.c
7985 @@ -3244,12 +3244,16 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
7986 const void *value, size_t size, int flags)
7987 {
7988 struct inode_security_struct *isec = inode_security_novalidate(inode);
7989 + struct superblock_security_struct *sbsec = inode->i_sb->s_security;
7990 u32 newsid;
7991 int rc;
7992
7993 if (strcmp(name, XATTR_SELINUX_SUFFIX))
7994 return -EOPNOTSUPP;
7995
7996 + if (!(sbsec->flags & SBLABEL_MNT))
7997 + return -EOPNOTSUPP;
7998 +
7999 if (!value || !size)
8000 return -EACCES;
8001
8002 @@ -6398,7 +6402,10 @@ static void selinux_inode_invalidate_secctx(struct inode *inode)
8003 */
8004 static int selinux_inode_notifysecctx(struct inode *inode, void *ctx, u32 ctxlen)
8005 {
8006 - return selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX, ctx, ctxlen, 0);
8007 + int rc = selinux_inode_setsecurity(inode, XATTR_SELINUX_SUFFIX,
8008 + ctx, ctxlen, 0);
8009 + /* Do not return error when suppressing label (SBLABEL_MNT not set). */
8010 + return rc == -EOPNOTSUPP ? 0 : rc;
8011 }
8012
8013 /*
8014 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
8015 index b67f6fe08a1b..e08c6c6ca029 100644
8016 --- a/sound/core/pcm_native.c
8017 +++ b/sound/core/pcm_native.c
8018 @@ -1513,6 +1513,14 @@ int snd_pcm_suspend_all(struct snd_pcm *pcm)
8019 /* FIXME: the open/close code should lock this as well */
8020 if (substream->runtime == NULL)
8021 continue;
8022 +
8023 + /*
8024 + * Skip BE dai link PCM's that are internal and may
8025 + * not have their substream ops set.
8026 + */
8027 + if (!substream->ops)
8028 + continue;
8029 +
8030 err = snd_pcm_suspend(substream);
8031 if (err < 0 && err != -EBUSY)
8032 return err;
8033 diff --git a/sound/firewire/dice/dice.c b/sound/firewire/dice/dice.c
8034 index ed50b222d36e..eee184b05d93 100644
8035 --- a/sound/firewire/dice/dice.c
8036 +++ b/sound/firewire/dice/dice.c
8037 @@ -18,6 +18,7 @@ MODULE_LICENSE("GPL v2");
8038 #define OUI_ALESIS 0x000595
8039 #define OUI_MAUDIO 0x000d6c
8040 #define OUI_MYTEK 0x001ee8
8041 +#define OUI_SSL 0x0050c2 // Actually ID reserved by IEEE.
8042
8043 #define DICE_CATEGORY_ID 0x04
8044 #define WEISS_CATEGORY_ID 0x00
8045 @@ -196,7 +197,7 @@ static int dice_probe(struct fw_unit *unit,
8046 struct snd_dice *dice;
8047 int err;
8048
8049 - if (!entry->driver_data) {
8050 + if (!entry->driver_data && entry->vendor_id != OUI_SSL) {
8051 err = check_dice_category(unit);
8052 if (err < 0)
8053 return -ENODEV;
8054 @@ -361,6 +362,15 @@ static const struct ieee1394_device_id dice_id_table[] = {
8055 .model_id = 0x000002,
8056 .driver_data = (kernel_ulong_t)snd_dice_detect_mytek_formats,
8057 },
8058 + // Solid State Logic, Duende Classic and Mini.
8059 + // NOTE: each field of GUID in config ROM is not compliant to standard
8060 + // DICE scheme.
8061 + {
8062 + .match_flags = IEEE1394_MATCH_VENDOR_ID |
8063 + IEEE1394_MATCH_MODEL_ID,
8064 + .vendor_id = OUI_SSL,
8065 + .model_id = 0x000070,
8066 + },
8067 {
8068 .match_flags = IEEE1394_MATCH_VERSION,
8069 .version = DICE_INTERFACE,
8070 diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
8071 index 81f2fe2c6d23..60f87a0d99f4 100644
8072 --- a/sound/soc/fsl/fsl-asoc-card.c
8073 +++ b/sound/soc/fsl/fsl-asoc-card.c
8074 @@ -689,6 +689,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
8075 asrc_fail:
8076 of_node_put(asrc_np);
8077 of_node_put(codec_np);
8078 + put_device(&cpu_pdev->dev);
8079 fail:
8080 of_node_put(cpu_np);
8081
8082 diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
8083 index c29200cf755a..9b9a7ec52905 100644
8084 --- a/sound/soc/fsl/imx-sgtl5000.c
8085 +++ b/sound/soc/fsl/imx-sgtl5000.c
8086 @@ -108,6 +108,7 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
8087 ret = -EPROBE_DEFER;
8088 goto fail;
8089 }
8090 + put_device(&ssi_pdev->dev);
8091 codec_dev = of_find_i2c_device_by_node(codec_np);
8092 if (!codec_dev) {
8093 dev_err(&pdev->dev, "failed to find codec platform device\n");
8094 diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
8095 index b807a47515eb..336895f7fd1e 100644
8096 --- a/sound/soc/generic/simple-card-utils.c
8097 +++ b/sound/soc/generic/simple-card-utils.c
8098 @@ -283,12 +283,20 @@ static int asoc_simple_card_get_dai_id(struct device_node *ep)
8099 /* use endpoint/port reg if exist */
8100 ret = of_graph_parse_endpoint(ep, &info);
8101 if (ret == 0) {
8102 - if (info.id)
8103 + /*
8104 + * Because it will count port/endpoint if it doesn't have "reg".
8105 + * But, we can't judge whether it has "no reg", or "reg = <0>"
8106 + * only of_graph_parse_endpoint().
8107 + * We need to check "reg" property
8108 + */
8109 + if (of_get_property(ep, "reg", NULL))
8110 return info.id;
8111 - if (info.port)
8112 +
8113 + node = of_get_parent(ep);
8114 + of_node_put(node);
8115 + if (of_get_property(node, "reg", NULL))
8116 return info.port;
8117 }
8118 -
8119 node = of_graph_get_port_parent(ep);
8120
8121 /*
8122 diff --git a/sound/soc/qcom/common.c b/sound/soc/qcom/common.c
8123 index 4715527054e5..5661025e8cec 100644
8124 --- a/sound/soc/qcom/common.c
8125 +++ b/sound/soc/qcom/common.c
8126 @@ -42,6 +42,9 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
8127 link = card->dai_link;
8128 for_each_child_of_node(dev->of_node, np) {
8129 cpu = of_get_child_by_name(np, "cpu");
8130 + platform = of_get_child_by_name(np, "platform");
8131 + codec = of_get_child_by_name(np, "codec");
8132 +
8133 if (!cpu) {
8134 dev_err(dev, "Can't find cpu DT node\n");
8135 ret = -EINVAL;
8136 @@ -63,8 +66,6 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
8137 goto err;
8138 }
8139
8140 - platform = of_get_child_by_name(np, "platform");
8141 - codec = of_get_child_by_name(np, "codec");
8142 if (codec && platform) {
8143 link->platform_of_node = of_parse_phandle(platform,
8144 "sound-dai",
8145 @@ -100,10 +101,15 @@ int qcom_snd_parse_of(struct snd_soc_card *card)
8146 link->dpcm_capture = 1;
8147 link->stream_name = link->name;
8148 link++;
8149 +
8150 + of_node_put(cpu);
8151 + of_node_put(codec);
8152 + of_node_put(platform);
8153 }
8154
8155 return 0;
8156 err:
8157 + of_node_put(np);
8158 of_node_put(cpu);
8159 of_node_put(codec);
8160 of_node_put(platform);
8161 diff --git a/tools/build/Makefile.feature b/tools/build/Makefile.feature
8162 index 5467c6bf9ceb..bb9dca65eb5f 100644
8163 --- a/tools/build/Makefile.feature
8164 +++ b/tools/build/Makefile.feature
8165 @@ -70,7 +70,6 @@ FEATURE_TESTS_BASIC := \
8166 sched_getcpu \
8167 sdt \
8168 setns \
8169 - libopencsd \
8170 libaio
8171
8172 # FEATURE_TESTS_BASIC + FEATURE_TESTS_EXTRA is the complete list
8173 @@ -84,6 +83,7 @@ FEATURE_TESTS_EXTRA := \
8174 libbabeltrace \
8175 libbfd-liberty \
8176 libbfd-liberty-z \
8177 + libopencsd \
8178 libunwind-debug-frame \
8179 libunwind-debug-frame-arm \
8180 libunwind-debug-frame-aarch64 \
8181 diff --git a/tools/build/feature/test-all.c b/tools/build/feature/test-all.c
8182 index 20cdaa4fc112..e903b86b742f 100644
8183 --- a/tools/build/feature/test-all.c
8184 +++ b/tools/build/feature/test-all.c
8185 @@ -170,14 +170,14 @@
8186 # include "test-setns.c"
8187 #undef main
8188
8189 -#define main main_test_libopencsd
8190 -# include "test-libopencsd.c"
8191 -#undef main
8192 -
8193 #define main main_test_libaio
8194 # include "test-libaio.c"
8195 #undef main
8196
8197 +#define main main_test_reallocarray
8198 +# include "test-reallocarray.c"
8199 +#undef main
8200 +
8201 int main(int argc, char *argv[])
8202 {
8203 main_test_libpython();
8204 @@ -217,8 +217,8 @@ int main(int argc, char *argv[])
8205 main_test_sched_getcpu();
8206 main_test_sdt();
8207 main_test_setns();
8208 - main_test_libopencsd();
8209 main_test_libaio();
8210 + main_test_reallocarray();
8211
8212 return 0;
8213 }
8214 diff --git a/tools/build/feature/test-reallocarray.c b/tools/build/feature/test-reallocarray.c
8215 index 8170de35150d..8f6743e31da7 100644
8216 --- a/tools/build/feature/test-reallocarray.c
8217 +++ b/tools/build/feature/test-reallocarray.c
8218 @@ -6,3 +6,5 @@ int main(void)
8219 {
8220 return !!reallocarray(NULL, 1, 1);
8221 }
8222 +
8223 +#undef _GNU_SOURCE
8224 diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
8225 index 34d9c3619c96..78fd86b85087 100644
8226 --- a/tools/lib/bpf/Makefile
8227 +++ b/tools/lib/bpf/Makefile
8228 @@ -162,7 +162,8 @@ endif
8229
8230 TARGETS = $(CMD_TARGETS)
8231
8232 -all: fixdep all_cmd
8233 +all: fixdep
8234 + $(Q)$(MAKE) all_cmd
8235
8236 all_cmd: $(CMD_TARGETS) check
8237
8238 diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
8239 index c8fbd0306960..11f425662b43 100755
8240 --- a/tools/lib/lockdep/run_tests.sh
8241 +++ b/tools/lib/lockdep/run_tests.sh
8242 @@ -11,7 +11,7 @@ find tests -name '*.c' | sort | while read -r i; do
8243 testname=$(basename "$i" .c)
8244 echo -ne "$testname... "
8245 if gcc -o "tests/$testname" -pthread "$i" liblockdep.a -Iinclude -D__USE_LIBLOCKDEP &&
8246 - timeout 1 "tests/$testname" 2>&1 | "tests/${testname}.sh"; then
8247 + timeout 1 "tests/$testname" 2>&1 | /bin/bash "tests/${testname}.sh"; then
8248 echo "PASSED!"
8249 else
8250 echo "FAILED!"
8251 @@ -24,7 +24,7 @@ find tests -name '*.c' | sort | while read -r i; do
8252 echo -ne "(PRELOAD) $testname... "
8253 if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
8254 timeout 1 ./lockdep "tests/$testname" 2>&1 |
8255 - "tests/${testname}.sh"; then
8256 + /bin/bash "tests/${testname}.sh"; then
8257 echo "PASSED!"
8258 else
8259 echo "FAILED!"
8260 @@ -37,7 +37,7 @@ find tests -name '*.c' | sort | while read -r i; do
8261 echo -ne "(PRELOAD + Valgrind) $testname... "
8262 if gcc -o "tests/$testname" -pthread -Iinclude "$i" &&
8263 { timeout 10 valgrind --read-var-info=yes ./lockdep "./tests/$testname" >& "tests/${testname}.vg.out"; true; } &&
8264 - "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
8265 + /bin/bash "tests/${testname}.sh" < "tests/${testname}.vg.out" &&
8266 ! grep -Eq '(^==[0-9]*== (Invalid |Uninitialised ))|Mismatched free|Source and destination overlap| UME ' "tests/${testname}.vg.out"; then
8267 echo "PASSED!"
8268 else
8269 diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
8270 index abd4fa5d3088..87494c7c619d 100644
8271 --- a/tools/lib/traceevent/event-parse.c
8272 +++ b/tools/lib/traceevent/event-parse.c
8273 @@ -2457,7 +2457,7 @@ static int arg_num_eval(struct tep_print_arg *arg, long long *val)
8274 static char *arg_eval (struct tep_print_arg *arg)
8275 {
8276 long long val;
8277 - static char buf[20];
8278 + static char buf[24];
8279
8280 switch (arg->type) {
8281 case TEP_PRINT_ATOM:
8282 diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
8283 index b441c88cafa1..cf4a8329c4c0 100644
8284 --- a/tools/perf/Makefile.config
8285 +++ b/tools/perf/Makefile.config
8286 @@ -218,6 +218,8 @@ FEATURE_CHECK_LDFLAGS-libpython := $(PYTHON_EMBED_LDOPTS)
8287 FEATURE_CHECK_CFLAGS-libpython-version := $(PYTHON_EMBED_CCOPTS)
8288 FEATURE_CHECK_LDFLAGS-libpython-version := $(PYTHON_EMBED_LDOPTS)
8289
8290 +FEATURE_CHECK_LDFLAGS-libaio = -lrt
8291 +
8292 CFLAGS += -fno-omit-frame-pointer
8293 CFLAGS += -ggdb3
8294 CFLAGS += -funwind-tables
8295 @@ -386,7 +388,8 @@ ifeq ($(feature-setns), 1)
8296 $(call detected,CONFIG_SETNS)
8297 endif
8298
8299 -ifndef NO_CORESIGHT
8300 +ifdef CORESIGHT
8301 + $(call feature_check,libopencsd)
8302 ifeq ($(feature-libopencsd), 1)
8303 CFLAGS += -DHAVE_CSTRACE_SUPPORT $(LIBOPENCSD_CFLAGS)
8304 LDFLAGS += $(LIBOPENCSD_LDFLAGS)
8305 diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
8306 index 0ee6795d82cc..77f8f069f1e7 100644
8307 --- a/tools/perf/Makefile.perf
8308 +++ b/tools/perf/Makefile.perf
8309 @@ -102,7 +102,7 @@ include ../scripts/utilities.mak
8310 # When selected, pass LLVM_CONFIG=/path/to/llvm-config to `make' if
8311 # llvm-config is not in $PATH.
8312 #
8313 -# Define NO_CORESIGHT if you do not want support for CoreSight trace decoding.
8314 +# Define CORESIGHT if you DO WANT support for CoreSight trace decoding.
8315 #
8316 # Define NO_AIO if you do not want support of Posix AIO based trace
8317 # streaming for record mode. Currently Posix AIO trace streaming is
8318 diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
8319 index d340d2e42776..13758a0b367b 100644
8320 --- a/tools/perf/builtin-c2c.c
8321 +++ b/tools/perf/builtin-c2c.c
8322 @@ -2055,6 +2055,12 @@ static int setup_nodes(struct perf_session *session)
8323 if (!set)
8324 return -ENOMEM;
8325
8326 + nodes[node] = set;
8327 +
8328 + /* empty node, skip */
8329 + if (cpu_map__empty(map))
8330 + continue;
8331 +
8332 for (cpu = 0; cpu < map->nr; cpu++) {
8333 set_bit(map->map[cpu], set);
8334
8335 @@ -2063,8 +2069,6 @@ static int setup_nodes(struct perf_session *session)
8336
8337 cpu2node[map->map[cpu]] = node;
8338 }
8339 -
8340 - nodes[node] = set;
8341 }
8342
8343 setup_nodes_header();
8344 diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
8345 index ac221f137ed2..cff4d10daf49 100644
8346 --- a/tools/perf/builtin-script.c
8347 +++ b/tools/perf/builtin-script.c
8348 @@ -148,6 +148,7 @@ static struct {
8349 unsigned int print_ip_opts;
8350 u64 fields;
8351 u64 invalid_fields;
8352 + u64 user_set_fields;
8353 } output[OUTPUT_TYPE_MAX] = {
8354
8355 [PERF_TYPE_HARDWARE] = {
8356 @@ -344,7 +345,7 @@ static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
8357 if (attr->sample_type & sample_type)
8358 return 0;
8359
8360 - if (output[type].user_set) {
8361 + if (output[type].user_set_fields & field) {
8362 if (allow_user_set)
8363 return 0;
8364 evname = perf_evsel__name(evsel);
8365 @@ -2627,10 +2628,13 @@ parse:
8366 pr_warning("\'%s\' not valid for %s events. Ignoring.\n",
8367 all_output_options[i].str, event_type(j));
8368 } else {
8369 - if (change == REMOVE)
8370 + if (change == REMOVE) {
8371 output[j].fields &= ~all_output_options[i].field;
8372 - else
8373 + output[j].user_set_fields &= ~all_output_options[i].field;
8374 + } else {
8375 output[j].fields |= all_output_options[i].field;
8376 + output[j].user_set_fields |= all_output_options[i].field;
8377 + }
8378 output[j].user_set = true;
8379 output[j].wildcard_set = true;
8380 }
8381 diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
8382 index b36061cd1ab8..91cdbf504535 100644
8383 --- a/tools/perf/builtin-trace.c
8384 +++ b/tools/perf/builtin-trace.c
8385 @@ -1039,6 +1039,9 @@ static const size_t trace__entry_str_size = 2048;
8386
8387 static struct file *thread_trace__files_entry(struct thread_trace *ttrace, int fd)
8388 {
8389 + if (fd < 0)
8390 + return NULL;
8391 +
8392 if (fd > ttrace->files.max) {
8393 struct file *nfiles = realloc(ttrace->files.table, (fd + 1) * sizeof(struct file));
8394
8395 @@ -3865,7 +3868,8 @@ int cmd_trace(int argc, const char **argv)
8396 goto init_augmented_syscall_tp;
8397 }
8398
8399 - if (strcmp(perf_evsel__name(evsel), "raw_syscalls:sys_enter") == 0) {
8400 + if (trace.syscalls.events.augmented->priv == NULL &&
8401 + strstr(perf_evsel__name(evsel), "syscalls:sys_enter")) {
8402 struct perf_evsel *augmented = trace.syscalls.events.augmented;
8403 if (perf_evsel__init_augmented_syscall_tp(augmented, evsel) ||
8404 perf_evsel__init_augmented_syscall_tp_args(augmented))
8405 diff --git a/tools/perf/tests/evsel-tp-sched.c b/tools/perf/tests/evsel-tp-sched.c
8406 index 5cbba70bcdd0..ea7acf403727 100644
8407 --- a/tools/perf/tests/evsel-tp-sched.c
8408 +++ b/tools/perf/tests/evsel-tp-sched.c
8409 @@ -43,7 +43,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
8410 return -1;
8411 }
8412
8413 - if (perf_evsel__test_field(evsel, "prev_comm", 16, true))
8414 + if (perf_evsel__test_field(evsel, "prev_comm", 16, false))
8415 ret = -1;
8416
8417 if (perf_evsel__test_field(evsel, "prev_pid", 4, true))
8418 @@ -55,7 +55,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
8419 if (perf_evsel__test_field(evsel, "prev_state", sizeof(long), true))
8420 ret = -1;
8421
8422 - if (perf_evsel__test_field(evsel, "next_comm", 16, true))
8423 + if (perf_evsel__test_field(evsel, "next_comm", 16, false))
8424 ret = -1;
8425
8426 if (perf_evsel__test_field(evsel, "next_pid", 4, true))
8427 @@ -73,7 +73,7 @@ int test__perf_evsel__tp_sched_test(struct test *test __maybe_unused, int subtes
8428 return -1;
8429 }
8430
8431 - if (perf_evsel__test_field(evsel, "comm", 16, true))
8432 + if (perf_evsel__test_field(evsel, "comm", 16, false))
8433 ret = -1;
8434
8435 if (perf_evsel__test_field(evsel, "pid", 4, true))
8436 diff --git a/tools/perf/trace/beauty/msg_flags.c b/tools/perf/trace/beauty/msg_flags.c
8437 index d66c66315987..ea68db08b8e7 100644
8438 --- a/tools/perf/trace/beauty/msg_flags.c
8439 +++ b/tools/perf/trace/beauty/msg_flags.c
8440 @@ -29,7 +29,7 @@ static size_t syscall_arg__scnprintf_msg_flags(char *bf, size_t size,
8441 return scnprintf(bf, size, "NONE");
8442 #define P_MSG_FLAG(n) \
8443 if (flags & MSG_##n) { \
8444 - printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
8445 + printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
8446 flags &= ~MSG_##n; \
8447 }
8448
8449 diff --git a/tools/perf/trace/beauty/waitid_options.c b/tools/perf/trace/beauty/waitid_options.c
8450 index 6897fab40dcc..d4d10b33ba0e 100644
8451 --- a/tools/perf/trace/beauty/waitid_options.c
8452 +++ b/tools/perf/trace/beauty/waitid_options.c
8453 @@ -11,7 +11,7 @@ static size_t syscall_arg__scnprintf_waitid_options(char *bf, size_t size,
8454
8455 #define P_OPTION(n) \
8456 if (options & W##n) { \
8457 - printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : #n); \
8458 + printed += scnprintf(bf + printed, size - printed, "%s%s%s", printed ? "|" : "", show_prefix ? prefix : "", #n); \
8459 options &= ~W##n; \
8460 }
8461
8462 diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
8463 index 70de8f6b3aee..9142fd294e76 100644
8464 --- a/tools/perf/util/annotate.c
8465 +++ b/tools/perf/util/annotate.c
8466 @@ -1889,6 +1889,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
8467 struct annotation_options *options,
8468 struct arch **parch)
8469 {
8470 + struct annotation *notes = symbol__annotation(sym);
8471 struct annotate_args args = {
8472 .privsize = privsize,
8473 .evsel = evsel,
8474 @@ -1919,6 +1920,7 @@ int symbol__annotate(struct symbol *sym, struct map *map,
8475
8476 args.ms.map = map;
8477 args.ms.sym = sym;
8478 + notes->start = map__rip_2objdump(map, sym->start);
8479
8480 return symbol__disassemble(sym, &args);
8481 }
8482 @@ -2794,8 +2796,6 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev
8483
8484 symbol__calc_percent(sym, evsel);
8485
8486 - notes->start = map__rip_2objdump(map, sym->start);
8487 -
8488 annotation__set_offsets(notes, size);
8489 annotation__mark_jump_targets(notes, sym);
8490 annotation__compute_ipc(notes, size);
8491 diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
8492 index 68b2570304ec..08073a4d59a4 100644
8493 --- a/tools/perf/util/s390-cpumsf.c
8494 +++ b/tools/perf/util/s390-cpumsf.c
8495 @@ -301,6 +301,11 @@ static bool s390_cpumsf_validate(int machine_type,
8496 *dsdes = 85;
8497 *bsdes = 32;
8498 break;
8499 + case 2964:
8500 + case 2965:
8501 + *dsdes = 112;
8502 + *bsdes = 32;
8503 + break;
8504 default:
8505 /* Illegal trailer entry */
8506 return false;
8507 diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
8508 index 87ef16a1b17e..7059d1be2d09 100644
8509 --- a/tools/perf/util/scripting-engines/trace-event-python.c
8510 +++ b/tools/perf/util/scripting-engines/trace-event-python.c
8511 @@ -733,8 +733,7 @@ static PyObject *get_perf_sample_dict(struct perf_sample *sample,
8512 Py_FatalError("couldn't create Python dictionary");
8513
8514 pydict_set_item_string_decref(dict, "ev_name", _PyUnicode_FromString(perf_evsel__name(evsel)));
8515 - pydict_set_item_string_decref(dict, "attr", _PyUnicode_FromStringAndSize(
8516 - (const char *)&evsel->attr, sizeof(evsel->attr)));
8517 + pydict_set_item_string_decref(dict, "attr", _PyBytes_FromStringAndSize((const char *)&evsel->attr, sizeof(evsel->attr)));
8518
8519 pydict_set_item_string_decref(dict_sample, "pid",
8520 _PyLong_FromLong(sample->pid));
8521 @@ -1494,34 +1493,40 @@ static void _free_command_line(wchar_t **command_line, int num)
8522 static int python_start_script(const char *script, int argc, const char **argv)
8523 {
8524 struct tables *tables = &tables_global;
8525 + PyMODINIT_FUNC (*initfunc)(void);
8526 #if PY_MAJOR_VERSION < 3
8527 const char **command_line;
8528 #else
8529 wchar_t **command_line;
8530 #endif
8531 - char buf[PATH_MAX];
8532 + /*
8533 + * Use a non-const name variable to cope with python 2.6's
8534 + * PyImport_AppendInittab prototype
8535 + */
8536 + char buf[PATH_MAX], name[19] = "perf_trace_context";
8537 int i, err = 0;
8538 FILE *fp;
8539
8540 #if PY_MAJOR_VERSION < 3
8541 + initfunc = initperf_trace_context;
8542 command_line = malloc((argc + 1) * sizeof(const char *));
8543 command_line[0] = script;
8544 for (i = 1; i < argc + 1; i++)
8545 command_line[i] = argv[i - 1];
8546 #else
8547 + initfunc = PyInit_perf_trace_context;
8548 command_line = malloc((argc + 1) * sizeof(wchar_t *));
8549 command_line[0] = Py_DecodeLocale(script, NULL);
8550 for (i = 1; i < argc + 1; i++)
8551 command_line[i] = Py_DecodeLocale(argv[i - 1], NULL);
8552 #endif
8553
8554 + PyImport_AppendInittab(name, initfunc);
8555 Py_Initialize();
8556
8557 #if PY_MAJOR_VERSION < 3
8558 - initperf_trace_context();
8559 PySys_SetArgv(argc + 1, (char **)command_line);
8560 #else
8561 - PyInit_perf_trace_context();
8562 PySys_SetArgv(argc + 1, command_line);
8563 #endif
8564
8565 diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
8566 index 6c1a83768eb0..d0334c33da54 100644
8567 --- a/tools/perf/util/sort.c
8568 +++ b/tools/perf/util/sort.c
8569 @@ -230,8 +230,14 @@ static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
8570 if (sym_l == sym_r)
8571 return 0;
8572
8573 - if (sym_l->inlined || sym_r->inlined)
8574 - return strcmp(sym_l->name, sym_r->name);
8575 + if (sym_l->inlined || sym_r->inlined) {
8576 + int ret = strcmp(sym_l->name, sym_r->name);
8577 +
8578 + if (ret)
8579 + return ret;
8580 + if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
8581 + return 0;
8582 + }
8583
8584 if (sym_l->start != sym_r->start)
8585 return (int64_t)(sym_r->start - sym_l->start);
8586 diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c
8587 index dc86597d0cc4..ccf42c4e83f0 100644
8588 --- a/tools/perf/util/srcline.c
8589 +++ b/tools/perf/util/srcline.c
8590 @@ -104,7 +104,7 @@ static struct symbol *new_inline_sym(struct dso *dso,
8591 } else {
8592 /* create a fake symbol for the inline frame */
8593 inline_sym = symbol__new(base_sym ? base_sym->start : 0,
8594 - base_sym ? base_sym->end : 0,
8595 + base_sym ? (base_sym->end - base_sym->start) : 0,
8596 base_sym ? base_sym->binding : 0,
8597 base_sym ? base_sym->type : 0,
8598 funcname);
8599 diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
8600 index 41ab7a3668b3..936f726f7cd9 100644
8601 --- a/tools/testing/selftests/bpf/Makefile
8602 +++ b/tools/testing/selftests/bpf/Makefile
8603 @@ -96,6 +96,7 @@ $(BPFOBJ): force
8604 CLANG ?= clang
8605 LLC ?= llc
8606 LLVM_OBJCOPY ?= llvm-objcopy
8607 +LLVM_READELF ?= llvm-readelf
8608 BTF_PAHOLE ?= pahole
8609
8610 PROBE := $(shell $(LLC) -march=bpf -mcpu=probe -filetype=null /dev/null 2>&1)
8611 @@ -132,7 +133,7 @@ BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
8612 BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
8613 BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
8614 $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
8615 - readelf -S ./llvm_btf_verify.o | grep BTF; \
8616 + $(LLVM_READELF) -S ./llvm_btf_verify.o | grep BTF; \
8617 /bin/rm -f ./llvm_btf_verify.o)
8618
8619 ifneq ($(BTF_LLVM_PROBE),)
8620 diff --git a/tools/testing/selftests/bpf/test_map_in_map.c b/tools/testing/selftests/bpf/test_map_in_map.c
8621 index ce923e67e08e..2985f262846e 100644
8622 --- a/tools/testing/selftests/bpf/test_map_in_map.c
8623 +++ b/tools/testing/selftests/bpf/test_map_in_map.c
8624 @@ -27,6 +27,7 @@ SEC("xdp_mimtest")
8625 int xdp_mimtest0(struct xdp_md *ctx)
8626 {
8627 int value = 123;
8628 + int *value_p;
8629 int key = 0;
8630 void *map;
8631
8632 @@ -35,6 +36,9 @@ int xdp_mimtest0(struct xdp_md *ctx)
8633 return XDP_DROP;
8634
8635 bpf_map_update_elem(map, &key, &value, 0);
8636 + value_p = bpf_map_lookup_elem(map, &key);
8637 + if (!value_p || *value_p != 123)
8638 + return XDP_DROP;
8639
8640 map = bpf_map_lookup_elem(&mim_hash, &key);
8641 if (!map)
8642 diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
8643 index e2b9eee37187..6e05a22b346c 100644
8644 --- a/tools/testing/selftests/bpf/test_maps.c
8645 +++ b/tools/testing/selftests/bpf/test_maps.c
8646 @@ -43,7 +43,7 @@ static int map_flags;
8647 } \
8648 })
8649
8650 -static void test_hashmap(int task, void *data)
8651 +static void test_hashmap(unsigned int task, void *data)
8652 {
8653 long long key, next_key, first_key, value;
8654 int fd;
8655 @@ -133,7 +133,7 @@ static void test_hashmap(int task, void *data)
8656 close(fd);
8657 }
8658
8659 -static void test_hashmap_sizes(int task, void *data)
8660 +static void test_hashmap_sizes(unsigned int task, void *data)
8661 {
8662 int fd, i, j;
8663
8664 @@ -153,7 +153,7 @@ static void test_hashmap_sizes(int task, void *data)
8665 }
8666 }
8667
8668 -static void test_hashmap_percpu(int task, void *data)
8669 +static void test_hashmap_percpu(unsigned int task, void *data)
8670 {
8671 unsigned int nr_cpus = bpf_num_possible_cpus();
8672 BPF_DECLARE_PERCPU(long, value);
8673 @@ -280,7 +280,7 @@ static int helper_fill_hashmap(int max_entries)
8674 return fd;
8675 }
8676
8677 -static void test_hashmap_walk(int task, void *data)
8678 +static void test_hashmap_walk(unsigned int task, void *data)
8679 {
8680 int fd, i, max_entries = 1000;
8681 long long key, value, next_key;
8682 @@ -351,7 +351,7 @@ static void test_hashmap_zero_seed(void)
8683 close(second);
8684 }
8685
8686 -static void test_arraymap(int task, void *data)
8687 +static void test_arraymap(unsigned int task, void *data)
8688 {
8689 int key, next_key, fd;
8690 long long value;
8691 @@ -406,7 +406,7 @@ static void test_arraymap(int task, void *data)
8692 close(fd);
8693 }
8694
8695 -static void test_arraymap_percpu(int task, void *data)
8696 +static void test_arraymap_percpu(unsigned int task, void *data)
8697 {
8698 unsigned int nr_cpus = bpf_num_possible_cpus();
8699 BPF_DECLARE_PERCPU(long, values);
8700 @@ -502,7 +502,7 @@ static void test_arraymap_percpu_many_keys(void)
8701 close(fd);
8702 }
8703
8704 -static void test_devmap(int task, void *data)
8705 +static void test_devmap(unsigned int task, void *data)
8706 {
8707 int fd;
8708 __u32 key, value;
8709 @@ -517,7 +517,7 @@ static void test_devmap(int task, void *data)
8710 close(fd);
8711 }
8712
8713 -static void test_queuemap(int task, void *data)
8714 +static void test_queuemap(unsigned int task, void *data)
8715 {
8716 const int MAP_SIZE = 32;
8717 __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
8718 @@ -575,7 +575,7 @@ static void test_queuemap(int task, void *data)
8719 close(fd);
8720 }
8721
8722 -static void test_stackmap(int task, void *data)
8723 +static void test_stackmap(unsigned int task, void *data)
8724 {
8725 const int MAP_SIZE = 32;
8726 __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
8727 @@ -641,7 +641,7 @@ static void test_stackmap(int task, void *data)
8728 #define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o"
8729 #define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
8730 #define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o"
8731 -static void test_sockmap(int tasks, void *data)
8732 +static void test_sockmap(unsigned int tasks, void *data)
8733 {
8734 struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;
8735 int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;
8736 @@ -1258,10 +1258,11 @@ static void test_map_large(void)
8737 }
8738
8739 #define run_parallel(N, FN, DATA) \
8740 - printf("Fork %d tasks to '" #FN "'\n", N); \
8741 + printf("Fork %u tasks to '" #FN "'\n", N); \
8742 __run_parallel(N, FN, DATA)
8743
8744 -static void __run_parallel(int tasks, void (*fn)(int task, void *data),
8745 +static void __run_parallel(unsigned int tasks,
8746 + void (*fn)(unsigned int task, void *data),
8747 void *data)
8748 {
8749 pid_t pid[tasks];
8750 @@ -1302,7 +1303,7 @@ static void test_map_stress(void)
8751 #define DO_UPDATE 1
8752 #define DO_DELETE 0
8753
8754 -static void test_update_delete(int fn, void *data)
8755 +static void test_update_delete(unsigned int fn, void *data)
8756 {
8757 int do_update = ((int *)data)[1];
8758 int fd = ((int *)data)[0];
8759 diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
8760 index 2fd90d456892..9a967983abed 100644
8761 --- a/tools/testing/selftests/bpf/test_verifier.c
8762 +++ b/tools/testing/selftests/bpf/test_verifier.c
8763 @@ -34,6 +34,7 @@
8764 #include <linux/if_ether.h>
8765
8766 #include <bpf/bpf.h>
8767 +#include <bpf/libbpf.h>
8768
8769 #ifdef HAVE_GENHDR
8770 # include "autoconf.h"
8771 @@ -59,6 +60,7 @@
8772
8773 #define UNPRIV_SYSCTL "kernel/unprivileged_bpf_disabled"
8774 static bool unpriv_disabled = false;
8775 +static int skips;
8776
8777 struct bpf_test {
8778 const char *descr;
8779 @@ -15946,6 +15948,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
8780 pflags |= BPF_F_ANY_ALIGNMENT;
8781 fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
8782 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1);
8783 + if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
8784 + printf("SKIP (unsupported program type %d)\n", prog_type);
8785 + skips++;
8786 + goto close_fds;
8787 + }
8788
8789 expected_ret = unpriv && test->result_unpriv != UNDEF ?
8790 test->result_unpriv : test->result;
8791 @@ -16099,7 +16106,7 @@ static bool test_as_unpriv(struct bpf_test *test)
8792
8793 static int do_test(bool unpriv, unsigned int from, unsigned int to)
8794 {
8795 - int i, passes = 0, errors = 0, skips = 0;
8796 + int i, passes = 0, errors = 0;
8797
8798 for (i = from; i < to; i++) {
8799 struct bpf_test *test = &tests[i];
8800 diff --git a/tools/testing/selftests/ir/ir_loopback.c b/tools/testing/selftests/ir/ir_loopback.c
8801 index 858c19caf224..8cdf1b89ac9c 100644
8802 --- a/tools/testing/selftests/ir/ir_loopback.c
8803 +++ b/tools/testing/selftests/ir/ir_loopback.c
8804 @@ -27,6 +27,8 @@
8805
8806 #define TEST_SCANCODES 10
8807 #define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
8808 +#define SYSFS_PATH_MAX 256
8809 +#define DNAME_PATH_MAX 256
8810
8811 static const struct {
8812 enum rc_proto proto;
8813 @@ -56,7 +58,7 @@ static const struct {
8814 int lirc_open(const char *rc)
8815 {
8816 struct dirent *dent;
8817 - char buf[100];
8818 + char buf[SYSFS_PATH_MAX + DNAME_PATH_MAX];
8819 DIR *d;
8820 int fd;
8821
8822 diff --git a/tools/testing/selftests/seccomp/seccomp_bpf.c b/tools/testing/selftests/seccomp/seccomp_bpf.c
8823 index 7e632b465ab4..6d7a81306f8a 100644
8824 --- a/tools/testing/selftests/seccomp/seccomp_bpf.c
8825 +++ b/tools/testing/selftests/seccomp/seccomp_bpf.c
8826 @@ -2971,6 +2971,12 @@ TEST(get_metadata)
8827 struct seccomp_metadata md;
8828 long ret;
8829
8830 + /* Only real root can get metadata. */
8831 + if (geteuid()) {
8832 + XFAIL(return, "get_metadata requires real root");
8833 + return;
8834 + }
8835 +
8836 ASSERT_EQ(0, pipe(pipefd));
8837
8838 pid = fork();