Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0161-5.4.62-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3623 - (show annotations) (download)
Mon Sep 7 06:16:06 2020 UTC (3 years, 7 months ago) by niro
File size: 273436 byte(s)
-linux-5.4.62
1 diff --git a/Makefile b/Makefile
2 index 2c21b922644d7..aece56450bd9d 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 61
10 +SUBLEVEL = 62
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 @@ -442,6 +442,12 @@ PYTHON = python
15 PYTHON3 = python3
16 CHECK = sparse
17 BASH = bash
18 +KGZIP = gzip
19 +KBZIP2 = bzip2
20 +KLZOP = lzop
21 +LZMA = lzma
22 +LZ4 = lz4c
23 +XZ = xz
24
25 CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
26 -Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
27 @@ -490,6 +496,7 @@ CLANG_FLAGS :=
28 export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
29 export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL
30 export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
31 +export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ
32 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
33
34 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
35 @@ -997,10 +1004,10 @@ export mod_strip_cmd
36 mod_compress_cmd = true
37 ifdef CONFIG_MODULE_COMPRESS
38 ifdef CONFIG_MODULE_COMPRESS_GZIP
39 - mod_compress_cmd = gzip -n -f
40 + mod_compress_cmd = $(KGZIP) -n -f
41 endif # CONFIG_MODULE_COMPRESS_GZIP
42 ifdef CONFIG_MODULE_COMPRESS_XZ
43 - mod_compress_cmd = xz -f
44 + mod_compress_cmd = $(XZ) -f
45 endif # CONFIG_MODULE_COMPRESS_XZ
46 endif # CONFIG_MODULE_COMPRESS
47 export mod_compress_cmd
48 diff --git a/arch/arm/boot/deflate_xip_data.sh b/arch/arm/boot/deflate_xip_data.sh
49 index 40937248cebe3..304495c3c2c5d 100755
50 --- a/arch/arm/boot/deflate_xip_data.sh
51 +++ b/arch/arm/boot/deflate_xip_data.sh
52 @@ -56,7 +56,7 @@ trap 'rm -f "$XIPIMAGE.tmp"; exit 1' 1 2 3
53 # substitute the data section by a compressed version
54 $DD if="$XIPIMAGE" count=$data_start iflag=count_bytes of="$XIPIMAGE.tmp"
55 $DD if="$XIPIMAGE" skip=$data_start iflag=skip_bytes |
56 -gzip -9 >> "$XIPIMAGE.tmp"
57 +$KGZIP -9 >> "$XIPIMAGE.tmp"
58
59 # replace kernel binary
60 mv -f "$XIPIMAGE.tmp" "$XIPIMAGE"
61 diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
62 index 63d9f4a066e38..5a8e58b663420 100644
63 --- a/arch/arm/boot/dts/ls1021a.dtsi
64 +++ b/arch/arm/boot/dts/ls1021a.dtsi
65 @@ -753,7 +753,7 @@
66 fsl,tmr-prsc = <2>;
67 fsl,tmr-add = <0xaaaaaaab>;
68 fsl,tmr-fiper1 = <999999995>;
69 - fsl,tmr-fiper2 = <99990>;
70 + fsl,tmr-fiper2 = <999999995>;
71 fsl,max-adj = <499999999>;
72 fsl,extts-fifo;
73 };
74 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
75 index 11a7d6208087f..96abe558aea8b 100644
76 --- a/arch/arm64/Makefile
77 +++ b/arch/arm64/Makefile
78 @@ -146,7 +146,8 @@ zinstall install:
79 PHONY += vdso_install
80 vdso_install:
81 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
82 - $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
83 + $(if $(CONFIG_COMPAT_VDSO), \
84 + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@)
85
86 # We use MRPROPER_FILES and CLEAN_FILES now
87 archclean:
88 diff --git a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
89 index 1235830ffd0b7..38c0d74767e3f 100644
90 --- a/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
91 +++ b/arch/arm64/boot/dts/qcom/msm8916-pins.dtsi
92 @@ -521,7 +521,7 @@
93 pins = "gpio63", "gpio64", "gpio65", "gpio66",
94 "gpio67", "gpio68";
95 drive-strength = <2>;
96 - bias-disable;
97 + bias-pull-down;
98 };
99 };
100 };
101 diff --git a/arch/arm64/include/asm/smp.h b/arch/arm64/include/asm/smp.h
102 index a0c8a0b652593..0eadbf933e359 100644
103 --- a/arch/arm64/include/asm/smp.h
104 +++ b/arch/arm64/include/asm/smp.h
105 @@ -46,7 +46,12 @@ DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
106 * Logical CPU mapping.
107 */
108 extern u64 __cpu_logical_map[NR_CPUS];
109 -#define cpu_logical_map(cpu) __cpu_logical_map[cpu]
110 +extern u64 cpu_logical_map(int cpu);
111 +
112 +static inline void set_cpu_logical_map(int cpu, u64 hwid)
113 +{
114 + __cpu_logical_map[cpu] = hwid;
115 +}
116
117 struct seq_file;
118
119 diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c
120 index 0b2830379fe03..51462c59ab5da 100644
121 --- a/arch/arm64/kernel/cpu_errata.c
122 +++ b/arch/arm64/kernel/cpu_errata.c
123 @@ -917,6 +917,8 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
124 .desc = "ARM erratum 1418040",
125 .capability = ARM64_WORKAROUND_1418040,
126 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
127 + .type = (ARM64_CPUCAP_SCOPE_LOCAL_CPU |
128 + ARM64_CPUCAP_PERMITTED_FOR_LATE_CPU),
129 },
130 #endif
131 #ifdef CONFIG_ARM64_ERRATUM_1165522
132 diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
133 index fab013c5ee8c9..10190c4b16dc4 100644
134 --- a/arch/arm64/kernel/process.c
135 +++ b/arch/arm64/kernel/process.c
136 @@ -498,6 +498,39 @@ static void entry_task_switch(struct task_struct *next)
137 __this_cpu_write(__entry_task, next);
138 }
139
140 +/*
141 + * ARM erratum 1418040 handling, affecting the 32bit view of CNTVCT.
142 + * Assuming the virtual counter is enabled at the beginning of times:
143 + *
144 + * - disable access when switching from a 64bit task to a 32bit task
145 + * - enable access when switching from a 32bit task to a 64bit task
146 + */
147 +static void erratum_1418040_thread_switch(struct task_struct *prev,
148 + struct task_struct *next)
149 +{
150 + bool prev32, next32;
151 + u64 val;
152 +
153 + if (!(IS_ENABLED(CONFIG_ARM64_ERRATUM_1418040) &&
154 + cpus_have_const_cap(ARM64_WORKAROUND_1418040)))
155 + return;
156 +
157 + prev32 = is_compat_thread(task_thread_info(prev));
158 + next32 = is_compat_thread(task_thread_info(next));
159 +
160 + if (prev32 == next32)
161 + return;
162 +
163 + val = read_sysreg(cntkctl_el1);
164 +
165 + if (!next32)
166 + val |= ARCH_TIMER_USR_VCT_ACCESS_EN;
167 + else
168 + val &= ~ARCH_TIMER_USR_VCT_ACCESS_EN;
169 +
170 + write_sysreg(val, cntkctl_el1);
171 +}
172 +
173 /*
174 * Thread switching.
175 */
176 @@ -514,6 +547,7 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
177 uao_thread_switch(next);
178 ptrauth_thread_switch(next);
179 ssbs_thread_switch(next);
180 + erratum_1418040_thread_switch(prev, next);
181
182 /*
183 * Complete any pending TLB or cache maintenance on this CPU in case
184 diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
185 index 56f6645617548..d98987b82874f 100644
186 --- a/arch/arm64/kernel/setup.c
187 +++ b/arch/arm64/kernel/setup.c
188 @@ -85,7 +85,7 @@ u64 __cacheline_aligned boot_args[4];
189 void __init smp_setup_processor_id(void)
190 {
191 u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
192 - cpu_logical_map(0) = mpidr;
193 + set_cpu_logical_map(0, mpidr);
194
195 /*
196 * clear __my_cpu_offset on boot CPU to avoid hang caused by
197 @@ -276,6 +276,12 @@ arch_initcall(reserve_memblock_reserved_regions);
198
199 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
200
201 +u64 cpu_logical_map(int cpu)
202 +{
203 + return __cpu_logical_map[cpu];
204 +}
205 +EXPORT_SYMBOL_GPL(cpu_logical_map);
206 +
207 void __init setup_arch(char **cmdline_p)
208 {
209 init_mm.start_code = (unsigned long) _text;
210 diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
211 index 993a4aedfd377..102dc3e7f2e1d 100644
212 --- a/arch/arm64/kernel/smp.c
213 +++ b/arch/arm64/kernel/smp.c
214 @@ -549,7 +549,7 @@ acpi_map_gic_cpu_interface(struct acpi_madt_generic_interrupt *processor)
215 return;
216
217 /* map the logical cpu id to cpu MPIDR */
218 - cpu_logical_map(cpu_count) = hwid;
219 + set_cpu_logical_map(cpu_count, hwid);
220
221 cpu_madt_gicc[cpu_count] = *processor;
222
223 @@ -663,7 +663,7 @@ static void __init of_parse_and_init_cpus(void)
224 goto next;
225
226 pr_debug("cpu logical map 0x%llx\n", hwid);
227 - cpu_logical_map(cpu_count) = hwid;
228 + set_cpu_logical_map(cpu_count, hwid);
229
230 early_map_cpu_to_node(cpu_count, of_node_to_nid(dn));
231 next:
232 @@ -704,7 +704,7 @@ void __init smp_init_cpus(void)
233 for (i = 1; i < nr_cpu_ids; i++) {
234 if (cpu_logical_map(i) != INVALID_HWID) {
235 if (smp_cpu_setup(i))
236 - cpu_logical_map(i) = INVALID_HWID;
237 + set_cpu_logical_map(i, INVALID_HWID);
238 }
239 }
240 }
241 diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
242 index d76a3d39b2699..6f4838b475d0d 100644
243 --- a/arch/arm64/kvm/hyp/switch.c
244 +++ b/arch/arm64/kvm/hyp/switch.c
245 @@ -754,7 +754,7 @@ static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
246 * making sure it is a kernel address and not a PC-relative
247 * reference.
248 */
249 - asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
250 + asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
251
252 __hyp_do_panic(str_va,
253 spsr, elr,
254 diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
255 index 32240000dc0c8..2876a7df1b0a1 100644
256 --- a/arch/ia64/Makefile
257 +++ b/arch/ia64/Makefile
258 @@ -40,7 +40,7 @@ $(error Sorry, you need a newer version of the assember, one that is built from
259 endif
260
261 quiet_cmd_gzip = GZIP $@
262 -cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@
263 +cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@
264
265 quiet_cmd_objcopy = OBJCOPY $@
266 cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
267 diff --git a/arch/m68k/Makefile b/arch/m68k/Makefile
268 index 5d92883840969..0415d28dbe4fc 100644
269 --- a/arch/m68k/Makefile
270 +++ b/arch/m68k/Makefile
271 @@ -135,10 +135,10 @@ vmlinux.gz: vmlinux
272 ifndef CONFIG_KGDB
273 cp vmlinux vmlinux.tmp
274 $(STRIP) vmlinux.tmp
275 - gzip -9c vmlinux.tmp >vmlinux.gz
276 + $(KGZIP) -9c vmlinux.tmp >vmlinux.gz
277 rm vmlinux.tmp
278 else
279 - gzip -9c vmlinux >vmlinux.gz
280 + $(KGZIP) -9c vmlinux >vmlinux.gz
281 endif
282
283 bzImage: vmlinux.bz2
284 @@ -148,10 +148,10 @@ vmlinux.bz2: vmlinux
285 ifndef CONFIG_KGDB
286 cp vmlinux vmlinux.tmp
287 $(STRIP) vmlinux.tmp
288 - bzip2 -1c vmlinux.tmp >vmlinux.bz2
289 + $(KBZIP2) -1c vmlinux.tmp >vmlinux.bz2
290 rm vmlinux.tmp
291 else
292 - bzip2 -1c vmlinux >vmlinux.bz2
293 + $(KBZIP2) -1c vmlinux >vmlinux.bz2
294 endif
295
296 archclean:
297 diff --git a/arch/mips/vdso/genvdso.c b/arch/mips/vdso/genvdso.c
298 index b66b6b1c4aeb9..8f581a2c8578b 100644
299 --- a/arch/mips/vdso/genvdso.c
300 +++ b/arch/mips/vdso/genvdso.c
301 @@ -122,6 +122,7 @@ static void *map_vdso(const char *path, size_t *_size)
302 if (fstat(fd, &stat) != 0) {
303 fprintf(stderr, "%s: Failed to stat '%s': %s\n", program_name,
304 path, strerror(errno));
305 + close(fd);
306 return NULL;
307 }
308
309 @@ -130,6 +131,7 @@ static void *map_vdso(const char *path, size_t *_size)
310 if (addr == MAP_FAILED) {
311 fprintf(stderr, "%s: Failed to map '%s': %s\n", program_name,
312 path, strerror(errno));
313 + close(fd);
314 return NULL;
315 }
316
317 @@ -139,6 +141,7 @@ static void *map_vdso(const char *path, size_t *_size)
318 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG) != 0) {
319 fprintf(stderr, "%s: '%s' is not an ELF file\n", program_name,
320 path);
321 + close(fd);
322 return NULL;
323 }
324
325 @@ -150,6 +153,7 @@ static void *map_vdso(const char *path, size_t *_size)
326 default:
327 fprintf(stderr, "%s: '%s' has invalid ELF class\n",
328 program_name, path);
329 + close(fd);
330 return NULL;
331 }
332
333 @@ -161,6 +165,7 @@ static void *map_vdso(const char *path, size_t *_size)
334 default:
335 fprintf(stderr, "%s: '%s' has invalid ELF data order\n",
336 program_name, path);
337 + close(fd);
338 return NULL;
339 }
340
341 @@ -168,15 +173,18 @@ static void *map_vdso(const char *path, size_t *_size)
342 fprintf(stderr,
343 "%s: '%s' has invalid ELF machine (expected EM_MIPS)\n",
344 program_name, path);
345 + close(fd);
346 return NULL;
347 } else if (swap_uint16(ehdr->e_type) != ET_DYN) {
348 fprintf(stderr,
349 "%s: '%s' has invalid ELF type (expected ET_DYN)\n",
350 program_name, path);
351 + close(fd);
352 return NULL;
353 }
354
355 *_size = stat.st_size;
356 + close(fd);
357 return addr;
358 }
359
360 @@ -280,10 +288,12 @@ int main(int argc, char **argv)
361 /* Calculate and write symbol offsets to <output file> */
362 if (!get_symbols(dbg_vdso_path, dbg_vdso)) {
363 unlink(out_path);
364 + fclose(out_file);
365 return EXIT_FAILURE;
366 }
367
368 fprintf(out_file, "};\n");
369 + fclose(out_file);
370
371 return EXIT_SUCCESS;
372 }
373 diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
374 index 36b834f1c9330..53f974817affb 100644
375 --- a/arch/parisc/Makefile
376 +++ b/arch/parisc/Makefile
377 @@ -156,7 +156,7 @@ vmlinuz: bzImage
378 $(OBJCOPY) $(boot)/bzImage $@
379 else
380 vmlinuz: vmlinux
381 - @gzip -cf -9 $< > $@
382 + @$(KGZIP) -cf -9 $< > $@
383 endif
384
385 install:
386 diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
387 index a460298c7ddb4..f91ecb10d0ae7 100644
388 --- a/arch/powerpc/kernel/cpu_setup_power.S
389 +++ b/arch/powerpc/kernel/cpu_setup_power.S
390 @@ -184,7 +184,7 @@ __init_LPCR_ISA300:
391
392 __init_FSCR:
393 mfspr r3,SPRN_FSCR
394 - ori r3,r3,FSCR_TAR|FSCR_DSCR|FSCR_EBB
395 + ori r3,r3,FSCR_TAR|FSCR_EBB
396 mtspr SPRN_FSCR,r3
397 blr
398
399 diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
400 index ca92e01d0bd1b..f582aa2d98078 100644
401 --- a/arch/powerpc/perf/core-book3s.c
402 +++ b/arch/powerpc/perf/core-book3s.c
403 @@ -1522,9 +1522,16 @@ nocheck:
404 ret = 0;
405 out:
406 if (has_branch_stack(event)) {
407 - power_pmu_bhrb_enable(event);
408 - cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
409 - event->attr.branch_sample_type);
410 + u64 bhrb_filter = -1;
411 +
412 + if (ppmu->bhrb_filter_map)
413 + bhrb_filter = ppmu->bhrb_filter_map(
414 + event->attr.branch_sample_type);
415 +
416 + if (bhrb_filter != -1) {
417 + cpuhw->bhrb_filter = bhrb_filter;
418 + power_pmu_bhrb_enable(event);
419 + }
420 }
421
422 perf_pmu_enable(event->pmu);
423 @@ -1846,7 +1853,6 @@ static int power_pmu_event_init(struct perf_event *event)
424 int n;
425 int err;
426 struct cpu_hw_events *cpuhw;
427 - u64 bhrb_filter;
428
429 if (!ppmu)
430 return -ENOENT;
431 @@ -1952,7 +1958,10 @@ static int power_pmu_event_init(struct perf_event *event)
432 err = power_check_constraints(cpuhw, events, cflags, n + 1);
433
434 if (has_branch_stack(event)) {
435 - bhrb_filter = ppmu->bhrb_filter_map(
436 + u64 bhrb_filter = -1;
437 +
438 + if (ppmu->bhrb_filter_map)
439 + bhrb_filter = ppmu->bhrb_filter_map(
440 event->attr.branch_sample_type);
441
442 if (bhrb_filter == -1) {
443 @@ -2106,6 +2115,10 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
444
445 if (perf_event_overflow(event, &data, regs))
446 power_pmu_stop(event, 0);
447 + } else if (period) {
448 + /* Account for interrupt in case of invalid SIAR */
449 + if (perf_event_account_interrupt(event))
450 + power_pmu_stop(event, 0);
451 }
452 }
453
454 diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
455 index 0f7c8241912b9..f2ff359041eec 100644
456 --- a/arch/powerpc/platforms/cell/Kconfig
457 +++ b/arch/powerpc/platforms/cell/Kconfig
458 @@ -44,6 +44,7 @@ config SPU_FS
459 tristate "SPU file system"
460 default m
461 depends on PPC_CELL
462 + depends on COREDUMP
463 select SPU_BASE
464 help
465 The SPU file system is used to access Synergistic Processing
466 diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
467 index 50e1a8e02497d..3fd086533dcfc 100644
468 --- a/arch/powerpc/sysdev/xive/native.c
469 +++ b/arch/powerpc/sysdev/xive/native.c
470 @@ -18,6 +18,7 @@
471 #include <linux/delay.h>
472 #include <linux/cpumask.h>
473 #include <linux/mm.h>
474 +#include <linux/kmemleak.h>
475
476 #include <asm/prom.h>
477 #include <asm/io.h>
478 @@ -646,6 +647,7 @@ static bool xive_native_provision_pages(void)
479 pr_err("Failed to allocate provisioning page\n");
480 return false;
481 }
482 + kmemleak_ignore(p);
483 opal_xive_donate_page(chip, __pa(p));
484 }
485 return true;
486 diff --git a/arch/s390/include/asm/numa.h b/arch/s390/include/asm/numa.h
487 index 35f8cbe7e5bb0..c759dcffa9eaf 100644
488 --- a/arch/s390/include/asm/numa.h
489 +++ b/arch/s390/include/asm/numa.h
490 @@ -17,7 +17,6 @@
491
492 void numa_setup(void);
493 int numa_pfn_to_nid(unsigned long pfn);
494 -int __node_distance(int a, int b);
495 void numa_update_cpu_topology(void);
496
497 extern cpumask_t node_to_cpumask_map[MAX_NUMNODES];
498 diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
499 index cca406fdbe51f..ef9dd253dfad0 100644
500 --- a/arch/s390/include/asm/topology.h
501 +++ b/arch/s390/include/asm/topology.h
502 @@ -83,8 +83,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
503
504 #define pcibus_to_node(bus) __pcibus_to_node(bus)
505
506 -#define node_distance(a, b) __node_distance(a, b)
507 -
508 #else /* !CONFIG_NUMA */
509
510 #define numa_node_id numa_node_id
511 diff --git a/arch/s390/numa/numa.c b/arch/s390/numa/numa.c
512 index d2910fa834c8a..8386c58fdb3a0 100644
513 --- a/arch/s390/numa/numa.c
514 +++ b/arch/s390/numa/numa.c
515 @@ -49,12 +49,6 @@ void numa_update_cpu_topology(void)
516 mode->update_cpu_topology();
517 }
518
519 -int __node_distance(int a, int b)
520 -{
521 - return mode->distance ? mode->distance(a, b) : 0;
522 -}
523 -EXPORT_SYMBOL(__node_distance);
524 -
525 int numa_debug_enabled;
526
527 /*
528 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
529 index 9674321ce3a3b..8367bd7a9a810 100644
530 --- a/arch/x86/kernel/smpboot.c
531 +++ b/arch/x86/kernel/smpboot.c
532 @@ -1599,14 +1599,28 @@ int native_cpu_disable(void)
533 if (ret)
534 return ret;
535
536 - /*
537 - * Disable the local APIC. Otherwise IPI broadcasts will reach
538 - * it. It still responds normally to INIT, NMI, SMI, and SIPI
539 - * messages.
540 - */
541 - apic_soft_disable();
542 cpu_disable_common();
543
544 + /*
545 + * Disable the local APIC. Otherwise IPI broadcasts will reach
546 + * it. It still responds normally to INIT, NMI, SMI, and SIPI
547 + * messages.
548 + *
549 + * Disabling the APIC must happen after cpu_disable_common()
550 + * which invokes fixup_irqs().
551 + *
552 + * Disabling the APIC preserves already set bits in IRR, but
553 + * an interrupt arriving after disabling the local APIC does not
554 + * set the corresponding IRR bit.
555 + *
556 + * fixup_irqs() scans IRR for set bits so it can raise a not
557 + * yet handled interrupt on the new destination CPU via an IPI
558 + * but obviously it can't do so for IRR bits which are not set.
559 + * IOW, interrupts arriving after disabling the local APIC will
560 + * be lost.
561 + */
562 + apic_soft_disable();
563 +
564 return 0;
565 }
566
567 diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
568 index 12b707a4e52fd..342a1cfa48c57 100644
569 --- a/block/bfq-cgroup.c
570 +++ b/block/bfq-cgroup.c
571 @@ -332,7 +332,7 @@ static void bfqg_put(struct bfq_group *bfqg)
572 kfree(bfqg);
573 }
574
575 -void bfqg_and_blkg_get(struct bfq_group *bfqg)
576 +static void bfqg_and_blkg_get(struct bfq_group *bfqg)
577 {
578 /* see comments in bfq_bic_update_cgroup for why refcounting bfqg */
579 bfqg_get(bfqg);
580 diff --git a/block/bfq-iosched.h b/block/bfq-iosched.h
581 index c0232975075d0..de98fdfe9ea17 100644
582 --- a/block/bfq-iosched.h
583 +++ b/block/bfq-iosched.h
584 @@ -980,7 +980,6 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
585 struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
586 struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
587 struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
588 -void bfqg_and_blkg_get(struct bfq_group *bfqg);
589 void bfqg_and_blkg_put(struct bfq_group *bfqg);
590
591 #ifdef CONFIG_BFQ_GROUP_IOSCHED
592 diff --git a/block/bfq-wf2q.c b/block/bfq-wf2q.c
593 index 44079147e396e..05f0bf4a1144d 100644
594 --- a/block/bfq-wf2q.c
595 +++ b/block/bfq-wf2q.c
596 @@ -536,9 +536,7 @@ static void bfq_get_entity(struct bfq_entity *entity)
597 bfqq->ref++;
598 bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
599 bfqq, bfqq->ref);
600 - } else
601 - bfqg_and_blkg_get(container_of(entity, struct bfq_group,
602 - entity));
603 + }
604 }
605
606 /**
607 @@ -652,14 +650,8 @@ static void bfq_forget_entity(struct bfq_service_tree *st,
608
609 entity->on_st = false;
610 st->wsum -= entity->weight;
611 - if (is_in_service)
612 - return;
613 -
614 - if (bfqq)
615 + if (bfqq && !is_in_service)
616 bfq_put_queue(bfqq);
617 - else
618 - bfqg_and_blkg_put(container_of(entity, struct bfq_group,
619 - entity));
620 }
621
622 /**
623 diff --git a/block/bio.c b/block/bio.c
624 index 94d697217887a..87505a93bcff6 100644
625 --- a/block/bio.c
626 +++ b/block/bio.c
627 @@ -683,8 +683,8 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
628 struct page *page, unsigned int len, unsigned int off,
629 bool *same_page)
630 {
631 - phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) +
632 - bv->bv_offset + bv->bv_len - 1;
633 + size_t bv_end = bv->bv_offset + bv->bv_len;
634 + phys_addr_t vec_end_addr = page_to_phys(bv->bv_page) + bv_end - 1;
635 phys_addr_t page_addr = page_to_phys(page);
636
637 if (vec_end_addr + 1 != page_addr + off)
638 @@ -693,9 +693,9 @@ static inline bool page_is_mergeable(const struct bio_vec *bv,
639 return false;
640
641 *same_page = ((vec_end_addr & PAGE_MASK) == page_addr);
642 - if (!*same_page && pfn_to_page(PFN_DOWN(vec_end_addr)) + 1 != page)
643 - return false;
644 - return true;
645 + if (*same_page)
646 + return true;
647 + return (bv->bv_page + bv_end / PAGE_SIZE) == (page + off / PAGE_SIZE);
648 }
649
650 static bool bio_try_merge_pc_page(struct request_queue *q, struct bio *bio,
651 diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
652 index 1eb8895be4c6b..0c7addcd19859 100644
653 --- a/block/blk-cgroup.c
654 +++ b/block/blk-cgroup.c
655 @@ -1219,13 +1219,15 @@ int blkcg_init_queue(struct request_queue *q)
656 if (preloaded)
657 radix_tree_preload_end();
658
659 - ret = blk_iolatency_init(q);
660 + ret = blk_throtl_init(q);
661 if (ret)
662 goto err_destroy_all;
663
664 - ret = blk_throtl_init(q);
665 - if (ret)
666 + ret = blk_iolatency_init(q);
667 + if (ret) {
668 + blk_throtl_exit(q);
669 goto err_destroy_all;
670 + }
671 return 0;
672
673 err_destroy_all:
674 diff --git a/block/blk-merge.c b/block/blk-merge.c
675 index 93cff719b0661..86c4c1ef87429 100644
676 --- a/block/blk-merge.c
677 +++ b/block/blk-merge.c
678 @@ -154,7 +154,7 @@ static inline unsigned get_max_io_size(struct request_queue *q,
679 if (max_sectors > start_offset)
680 return max_sectors - start_offset;
681
682 - return sectors & (lbs - 1);
683 + return sectors & ~(lbs - 1);
684 }
685
686 static inline unsigned get_max_segment_size(const struct request_queue *q,
687 @@ -553,10 +553,17 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
688 }
689 EXPORT_SYMBOL(blk_rq_map_sg);
690
691 +static inline unsigned int blk_rq_get_max_segments(struct request *rq)
692 +{
693 + if (req_op(rq) == REQ_OP_DISCARD)
694 + return queue_max_discard_segments(rq->q);
695 + return queue_max_segments(rq->q);
696 +}
697 +
698 static inline int ll_new_hw_segment(struct request *req, struct bio *bio,
699 unsigned int nr_phys_segs)
700 {
701 - if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(req->q))
702 + if (req->nr_phys_segments + nr_phys_segs > blk_rq_get_max_segments(req))
703 goto no_merge;
704
705 if (blk_integrity_merge_bio(req->q, req, bio) == false)
706 @@ -640,7 +647,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
707 return 0;
708
709 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
710 - if (total_phys_segments > queue_max_segments(q))
711 + if (total_phys_segments > blk_rq_get_max_segments(req))
712 return 0;
713
714 if (blk_integrity_merge_rq(q, req, next) == false)
715 diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
716 index 74cedea560348..7620734d55429 100644
717 --- a/block/blk-mq-sched.c
718 +++ b/block/blk-mq-sched.c
719 @@ -77,6 +77,15 @@ void blk_mq_sched_restart(struct blk_mq_hw_ctx *hctx)
720 return;
721 clear_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
722
723 + /*
724 + * Order clearing SCHED_RESTART and list_empty_careful(&hctx->dispatch)
725 + * in blk_mq_run_hw_queue(). Its pair is the barrier in
726 + * blk_mq_dispatch_rq_list(). So dispatch code won't see SCHED_RESTART,
727 + * meantime new request added to hctx->dispatch is missed to check in
728 + * blk_mq_run_hw_queue().
729 + */
730 + smp_mb();
731 +
732 blk_mq_run_hw_queue(hctx, true);
733 }
734
735 diff --git a/block/blk-mq.c b/block/blk-mq.c
736 index ae7d31cb5a4e1..b748d1e63f9c8 100644
737 --- a/block/blk-mq.c
738 +++ b/block/blk-mq.c
739 @@ -1318,6 +1318,15 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
740 list_splice_tail_init(list, &hctx->dispatch);
741 spin_unlock(&hctx->lock);
742
743 + /*
744 + * Order adding requests to hctx->dispatch and checking
745 + * SCHED_RESTART flag. The pair of this smp_mb() is the one
746 + * in blk_mq_sched_restart(). Avoid restart code path to
747 + * miss the new added requests to hctx->dispatch, meantime
748 + * SCHED_RESTART is observed here.
749 + */
750 + smp_mb();
751 +
752 /*
753 * If SCHED_RESTART was set by the caller of this function and
754 * it is no longer set that means that it was cleared by another
755 @@ -1869,7 +1878,8 @@ insert:
756 if (bypass_insert)
757 return BLK_STS_RESOURCE;
758
759 - blk_mq_request_bypass_insert(rq, false, run_queue);
760 + blk_mq_sched_insert_request(rq, false, run_queue, false);
761 +
762 return BLK_STS_OK;
763 }
764
765 diff --git a/crypto/af_alg.c b/crypto/af_alg.c
766 index 35e026ba2c7ed..1d4b0157ee5dc 100644
767 --- a/crypto/af_alg.c
768 +++ b/crypto/af_alg.c
769 @@ -16,6 +16,7 @@
770 #include <linux/module.h>
771 #include <linux/net.h>
772 #include <linux/rwsem.h>
773 +#include <linux/sched.h>
774 #include <linux/sched/signal.h>
775 #include <linux/security.h>
776
777 @@ -847,9 +848,15 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
778 }
779
780 lock_sock(sk);
781 - if (ctx->init && (init || !ctx->more)) {
782 - err = -EINVAL;
783 - goto unlock;
784 + if (ctx->init && !ctx->more) {
785 + if (ctx->used) {
786 + err = -EINVAL;
787 + goto unlock;
788 + }
789 +
790 + pr_info_once(
791 + "%s sent an empty control message without MSG_MORE.\n",
792 + current->comm);
793 }
794 ctx->init = true;
795
796 diff --git a/drivers/base/core.c b/drivers/base/core.c
797 index 7bd9cd366d411..94df2ba1bbed7 100644
798 --- a/drivers/base/core.c
799 +++ b/drivers/base/core.c
800 @@ -3400,9 +3400,9 @@ static inline bool fwnode_is_primary(struct fwnode_handle *fwnode)
801 */
802 void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
803 {
804 - if (fwnode) {
805 - struct fwnode_handle *fn = dev->fwnode;
806 + struct fwnode_handle *fn = dev->fwnode;
807
808 + if (fwnode) {
809 if (fwnode_is_primary(fn))
810 fn = fn->secondary;
811
812 @@ -3412,8 +3412,12 @@ void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode)
813 }
814 dev->fwnode = fwnode;
815 } else {
816 - dev->fwnode = fwnode_is_primary(dev->fwnode) ?
817 - dev->fwnode->secondary : NULL;
818 + if (fwnode_is_primary(fn)) {
819 + dev->fwnode = fn->secondary;
820 + fn->secondary = NULL;
821 + } else {
822 + dev->fwnode = NULL;
823 + }
824 }
825 }
826 EXPORT_SYMBOL_GPL(set_primary_fwnode);
827 diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
828 index 8646147dc1946..23af545120534 100644
829 --- a/drivers/base/power/main.c
830 +++ b/drivers/base/power/main.c
831 @@ -1728,13 +1728,17 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
832 }
833
834 /*
835 - * If a device configured to wake up the system from sleep states
836 - * has been suspended at run time and there's a resume request pending
837 - * for it, this is equivalent to the device signaling wakeup, so the
838 - * system suspend operation should be aborted.
839 + * Wait for possible runtime PM transitions of the device in progress
840 + * to complete and if there's a runtime resume request pending for it,
841 + * resume it before proceeding with invoking the system-wide suspend
842 + * callbacks for it.
843 + *
844 + * If the system-wide suspend callbacks below change the configuration
845 + * of the device, they must disable runtime PM for it or otherwise
846 + * ensure that its runtime-resume callbacks will not be confused by that
847 + * change in case they are invoked going forward.
848 */
849 - if (pm_runtime_barrier(dev) && device_may_wakeup(dev))
850 - pm_wakeup_event(dev, 0);
851 + pm_runtime_barrier(dev);
852
853 if (pm_wakeup_pending()) {
854 dev->power.direct_complete = false;
855 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
856 index bddbbf5b3dda2..ffbe792410d1c 100644
857 --- a/drivers/block/loop.c
858 +++ b/drivers/block/loop.c
859 @@ -863,6 +863,7 @@ static void loop_config_discard(struct loop_device *lo)
860 struct file *file = lo->lo_backing_file;
861 struct inode *inode = file->f_mapping->host;
862 struct request_queue *q = lo->lo_queue;
863 + u32 granularity, max_discard_sectors;
864
865 /*
866 * If the backing device is a block device, mirror its zeroing
867 @@ -875,11 +876,10 @@ static void loop_config_discard(struct loop_device *lo)
868 struct request_queue *backingq;
869
870 backingq = bdev_get_queue(inode->i_bdev);
871 - blk_queue_max_discard_sectors(q,
872 - backingq->limits.max_write_zeroes_sectors);
873
874 - blk_queue_max_write_zeroes_sectors(q,
875 - backingq->limits.max_write_zeroes_sectors);
876 + max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
877 + granularity = backingq->limits.discard_granularity ?:
878 + queue_physical_block_size(backingq);
879
880 /*
881 * We use punch hole to reclaim the free space used by the
882 @@ -888,23 +888,26 @@ static void loop_config_discard(struct loop_device *lo)
883 * useful information.
884 */
885 } else if (!file->f_op->fallocate || lo->lo_encrypt_key_size) {
886 - q->limits.discard_granularity = 0;
887 - q->limits.discard_alignment = 0;
888 - blk_queue_max_discard_sectors(q, 0);
889 - blk_queue_max_write_zeroes_sectors(q, 0);
890 + max_discard_sectors = 0;
891 + granularity = 0;
892
893 } else {
894 - q->limits.discard_granularity = inode->i_sb->s_blocksize;
895 - q->limits.discard_alignment = 0;
896 -
897 - blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
898 - blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
899 + max_discard_sectors = UINT_MAX >> 9;
900 + granularity = inode->i_sb->s_blocksize;
901 }
902
903 - if (q->limits.max_write_zeroes_sectors)
904 + if (max_discard_sectors) {
905 + q->limits.discard_granularity = granularity;
906 + blk_queue_max_discard_sectors(q, max_discard_sectors);
907 + blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
908 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
909 - else
910 + } else {
911 + q->limits.discard_granularity = 0;
912 + blk_queue_max_discard_sectors(q, 0);
913 + blk_queue_max_write_zeroes_sectors(q, 0);
914 blk_queue_flag_clear(QUEUE_FLAG_DISCARD, q);
915 + }
916 + q->limits.discard_alignment = 0;
917 }
918
919 static void loop_unprepare_queue(struct loop_device *lo)
920 diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
921 index c4454cfc6d530..13eae973eaea4 100644
922 --- a/drivers/block/null_blk_main.c
923 +++ b/drivers/block/null_blk_main.c
924 @@ -1072,7 +1072,7 @@ static int null_handle_rq(struct nullb_cmd *cmd)
925 len = bvec.bv_len;
926 err = null_transfer(nullb, bvec.bv_page, len, bvec.bv_offset,
927 op_is_write(req_op(rq)), sector,
928 - req_op(rq) & REQ_FUA);
929 + rq->cmd_flags & REQ_FUA);
930 if (err) {
931 spin_unlock_irq(&nullb->lock);
932 return err;
933 diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
934 index c1de270046bfe..2eeb2bcb488d4 100644
935 --- a/drivers/block/virtio_blk.c
936 +++ b/drivers/block/virtio_blk.c
937 @@ -205,16 +205,31 @@ static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
938 if (!range)
939 return -ENOMEM;
940
941 - __rq_for_each_bio(bio, req) {
942 - u64 sector = bio->bi_iter.bi_sector;
943 - u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
944 -
945 - range[n].flags = cpu_to_le32(flags);
946 - range[n].num_sectors = cpu_to_le32(num_sectors);
947 - range[n].sector = cpu_to_le64(sector);
948 - n++;
949 + /*
950 + * Single max discard segment means multi-range discard isn't
951 + * supported, and block layer only runs contiguity merge like
952 + * normal RW request. So we can't reply on bio for retrieving
953 + * each range info.
954 + */
955 + if (queue_max_discard_segments(req->q) == 1) {
956 + range[0].flags = cpu_to_le32(flags);
957 + range[0].num_sectors = cpu_to_le32(blk_rq_sectors(req));
958 + range[0].sector = cpu_to_le64(blk_rq_pos(req));
959 + n = 1;
960 + } else {
961 + __rq_for_each_bio(bio, req) {
962 + u64 sector = bio->bi_iter.bi_sector;
963 + u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
964 +
965 + range[n].flags = cpu_to_le32(flags);
966 + range[n].num_sectors = cpu_to_le32(num_sectors);
967 + range[n].sector = cpu_to_le64(sector);
968 + n++;
969 + }
970 }
971
972 + WARN_ON_ONCE(n != segments);
973 +
974 req->special_vec.bv_page = virt_to_page(range);
975 req->special_vec.bv_offset = offset_in_page(range);
976 req->special_vec.bv_len = sizeof(*range) * segments;
977 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
978 index 53dc0fd6f6d3c..927eb3fd23660 100644
979 --- a/drivers/cpufreq/intel_pstate.c
980 +++ b/drivers/cpufreq/intel_pstate.c
981 @@ -649,11 +649,12 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
982 mutex_lock(&intel_pstate_limits_lock);
983
984 if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
985 - u64 value;
986 -
987 - ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, &value);
988 - if (ret)
989 - goto return_pref;
990 + /*
991 + * Use the cached HWP Request MSR value, because the register
992 + * itself may be updated by intel_pstate_hwp_boost_up() or
993 + * intel_pstate_hwp_boost_down() at any time.
994 + */
995 + u64 value = READ_ONCE(cpu_data->hwp_req_cached);
996
997 value &= ~GENMASK_ULL(31, 24);
998
999 @@ -661,13 +662,18 @@ static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
1000 epp = epp_values[pref_index - 1];
1001
1002 value |= (u64)epp << 24;
1003 + /*
1004 + * The only other updater of hwp_req_cached in the active mode,
1005 + * intel_pstate_hwp_set(), is called under the same lock as this
1006 + * function, so it cannot run in parallel with the update below.
1007 + */
1008 + WRITE_ONCE(cpu_data->hwp_req_cached, value);
1009 ret = wrmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST, value);
1010 } else {
1011 if (epp == -EINVAL)
1012 epp = (pref_index - 1) << 2;
1013 ret = intel_pstate_set_epb(cpu_data->cpu, epp);
1014 }
1015 -return_pref:
1016 mutex_unlock(&intel_pstate_limits_lock);
1017
1018 return ret;
1019 diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c
1020 index 2e65d7279d79e..027769e39f9b8 100644
1021 --- a/drivers/devfreq/rk3399_dmc.c
1022 +++ b/drivers/devfreq/rk3399_dmc.c
1023 @@ -95,18 +95,20 @@ static int rk3399_dmcfreq_target(struct device *dev, unsigned long *freq,
1024
1025 mutex_lock(&dmcfreq->lock);
1026
1027 - if (target_rate >= dmcfreq->odt_dis_freq)
1028 - odt_enable = true;
1029 -
1030 - /*
1031 - * This makes a SMC call to the TF-A to set the DDR PD (power-down)
1032 - * timings and to enable or disable the ODT (on-die termination)
1033 - * resistors.
1034 - */
1035 - arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
1036 - dmcfreq->odt_pd_arg1,
1037 - ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
1038 - odt_enable, 0, 0, 0, &res);
1039 + if (dmcfreq->regmap_pmu) {
1040 + if (target_rate >= dmcfreq->odt_dis_freq)
1041 + odt_enable = true;
1042 +
1043 + /*
1044 + * This makes a SMC call to the TF-A to set the DDR PD
1045 + * (power-down) timings and to enable or disable the
1046 + * ODT (on-die termination) resistors.
1047 + */
1048 + arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, dmcfreq->odt_pd_arg0,
1049 + dmcfreq->odt_pd_arg1,
1050 + ROCKCHIP_SIP_CONFIG_DRAM_SET_ODT_PD,
1051 + odt_enable, 0, 0, 0, &res);
1052 + }
1053
1054 /*
1055 * If frequency scaling from low to high, adjust voltage first.
1056 @@ -364,16 +366,21 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
1057 if (res.a0) {
1058 dev_err(dev, "Failed to set dram param: %ld\n",
1059 res.a0);
1060 - return -EINVAL;
1061 + ret = -EINVAL;
1062 + goto err_edev;
1063 }
1064 }
1065 }
1066
1067 node = of_parse_phandle(np, "rockchip,pmu", 0);
1068 - if (node) {
1069 - data->regmap_pmu = syscon_node_to_regmap(node);
1070 - if (IS_ERR(data->regmap_pmu))
1071 - return PTR_ERR(data->regmap_pmu);
1072 + if (!node)
1073 + goto no_pmu;
1074 +
1075 + data->regmap_pmu = syscon_node_to_regmap(node);
1076 + of_node_put(node);
1077 + if (IS_ERR(data->regmap_pmu)) {
1078 + ret = PTR_ERR(data->regmap_pmu);
1079 + goto err_edev;
1080 }
1081
1082 regmap_read(data->regmap_pmu, RK3399_PMUGRF_OS_REG2, &val);
1083 @@ -391,9 +398,11 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
1084 data->odt_dis_freq = data->timing.lpddr4_odt_dis_freq;
1085 break;
1086 default:
1087 - return -EINVAL;
1088 + ret = -EINVAL;
1089 + goto err_edev;
1090 };
1091
1092 +no_pmu:
1093 arm_smccc_smc(ROCKCHIP_SIP_DRAM_FREQ, 0, 0,
1094 ROCKCHIP_SIP_CONFIG_DRAM_INIT,
1095 0, 0, 0, 0, &res);
1096 @@ -425,7 +434,8 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
1097 */
1098 if (dev_pm_opp_of_add_table(dev)) {
1099 dev_err(dev, "Invalid operating-points in device tree.\n");
1100 - return -EINVAL;
1101 + ret = -EINVAL;
1102 + goto err_edev;
1103 }
1104
1105 of_property_read_u32(np, "upthreshold",
1106 @@ -465,6 +475,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev)
1107
1108 err_free_opp:
1109 dev_pm_opp_of_remove_table(&pdev->dev);
1110 +err_edev:
1111 + devfreq_event_disable_edev(data->edev);
1112 +
1113 return ret;
1114 }
1115
1116 diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
1117 index a71cca6eeb333..6be7e65f7389d 100644
1118 --- a/drivers/edac/i7core_edac.c
1119 +++ b/drivers/edac/i7core_edac.c
1120 @@ -1711,9 +1711,9 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1121 if (uncorrected_error) {
1122 core_err_cnt = 1;
1123 if (ripv)
1124 - tp_event = HW_EVENT_ERR_FATAL;
1125 - else
1126 tp_event = HW_EVENT_ERR_UNCORRECTED;
1127 + else
1128 + tp_event = HW_EVENT_ERR_FATAL;
1129 } else {
1130 tp_event = HW_EVENT_ERR_CORRECTED;
1131 }
1132 diff --git a/drivers/edac/ie31200_edac.c b/drivers/edac/ie31200_edac.c
1133 index d26300f9cb07d..9be43b4f9c506 100644
1134 --- a/drivers/edac/ie31200_edac.c
1135 +++ b/drivers/edac/ie31200_edac.c
1136 @@ -170,6 +170,8 @@
1137 (n << (28 + (2 * skl) - PAGE_SHIFT))
1138
1139 static int nr_channels;
1140 +static struct pci_dev *mci_pdev;
1141 +static int ie31200_registered = 1;
1142
1143 struct ie31200_priv {
1144 void __iomem *window;
1145 @@ -541,12 +543,16 @@ fail_free:
1146 static int ie31200_init_one(struct pci_dev *pdev,
1147 const struct pci_device_id *ent)
1148 {
1149 - edac_dbg(0, "MC:\n");
1150 + int rc;
1151
1152 + edac_dbg(0, "MC:\n");
1153 if (pci_enable_device(pdev) < 0)
1154 return -EIO;
1155 + rc = ie31200_probe1(pdev, ent->driver_data);
1156 + if (rc == 0 && !mci_pdev)
1157 + mci_pdev = pci_dev_get(pdev);
1158
1159 - return ie31200_probe1(pdev, ent->driver_data);
1160 + return rc;
1161 }
1162
1163 static void ie31200_remove_one(struct pci_dev *pdev)
1164 @@ -555,6 +561,8 @@ static void ie31200_remove_one(struct pci_dev *pdev)
1165 struct ie31200_priv *priv;
1166
1167 edac_dbg(0, "\n");
1168 + pci_dev_put(mci_pdev);
1169 + mci_pdev = NULL;
1170 mci = edac_mc_del_mc(&pdev->dev);
1171 if (!mci)
1172 return;
1173 @@ -596,17 +604,53 @@ static struct pci_driver ie31200_driver = {
1174
1175 static int __init ie31200_init(void)
1176 {
1177 + int pci_rc, i;
1178 +
1179 edac_dbg(3, "MC:\n");
1180 /* Ensure that the OPSTATE is set correctly for POLL or NMI */
1181 opstate_init();
1182
1183 - return pci_register_driver(&ie31200_driver);
1184 + pci_rc = pci_register_driver(&ie31200_driver);
1185 + if (pci_rc < 0)
1186 + goto fail0;
1187 +
1188 + if (!mci_pdev) {
1189 + ie31200_registered = 0;
1190 + for (i = 0; ie31200_pci_tbl[i].vendor != 0; i++) {
1191 + mci_pdev = pci_get_device(ie31200_pci_tbl[i].vendor,
1192 + ie31200_pci_tbl[i].device,
1193 + NULL);
1194 + if (mci_pdev)
1195 + break;
1196 + }
1197 + if (!mci_pdev) {
1198 + edac_dbg(0, "ie31200 pci_get_device fail\n");
1199 + pci_rc = -ENODEV;
1200 + goto fail1;
1201 + }
1202 + pci_rc = ie31200_init_one(mci_pdev, &ie31200_pci_tbl[i]);
1203 + if (pci_rc < 0) {
1204 + edac_dbg(0, "ie31200 init fail\n");
1205 + pci_rc = -ENODEV;
1206 + goto fail1;
1207 + }
1208 + }
1209 + return 0;
1210 +
1211 +fail1:
1212 + pci_unregister_driver(&ie31200_driver);
1213 +fail0:
1214 + pci_dev_put(mci_pdev);
1215 +
1216 + return pci_rc;
1217 }
1218
1219 static void __exit ie31200_exit(void)
1220 {
1221 edac_dbg(3, "MC:\n");
1222 pci_unregister_driver(&ie31200_driver);
1223 + if (!ie31200_registered)
1224 + ie31200_remove_one(mci_pdev);
1225 }
1226
1227 module_init(ie31200_init);
1228 diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c
1229 index b1193be1ef1d8..dac45e2071b3f 100644
1230 --- a/drivers/edac/pnd2_edac.c
1231 +++ b/drivers/edac/pnd2_edac.c
1232 @@ -1155,7 +1155,7 @@ static void pnd2_mce_output_error(struct mem_ctl_info *mci, const struct mce *m,
1233 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1234 int rc;
1235
1236 - tp_event = uc_err ? (ripv ? HW_EVENT_ERR_FATAL : HW_EVENT_ERR_UNCORRECTED) :
1237 + tp_event = uc_err ? (ripv ? HW_EVENT_ERR_UNCORRECTED : HW_EVENT_ERR_FATAL) :
1238 HW_EVENT_ERR_CORRECTED;
1239
1240 /*
1241 diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
1242 index f743502ca9b72..b557a53c75c46 100644
1243 --- a/drivers/edac/sb_edac.c
1244 +++ b/drivers/edac/sb_edac.c
1245 @@ -254,18 +254,20 @@ static const u32 rir_offset[MAX_RIR_RANGES][MAX_RIR_WAY] = {
1246 * FIXME: Implement the error count reads directly
1247 */
1248
1249 -static const u32 correrrcnt[] = {
1250 - 0x104, 0x108, 0x10c, 0x110,
1251 -};
1252 -
1253 #define RANK_ODD_OV(reg) GET_BITFIELD(reg, 31, 31)
1254 #define RANK_ODD_ERR_CNT(reg) GET_BITFIELD(reg, 16, 30)
1255 #define RANK_EVEN_OV(reg) GET_BITFIELD(reg, 15, 15)
1256 #define RANK_EVEN_ERR_CNT(reg) GET_BITFIELD(reg, 0, 14)
1257
1258 +#if 0 /* Currently unused*/
1259 +static const u32 correrrcnt[] = {
1260 + 0x104, 0x108, 0x10c, 0x110,
1261 +};
1262 +
1263 static const u32 correrrthrsld[] = {
1264 0x11c, 0x120, 0x124, 0x128,
1265 };
1266 +#endif
1267
1268 #define RANK_ODD_ERR_THRSLD(reg) GET_BITFIELD(reg, 16, 30)
1269 #define RANK_EVEN_ERR_THRSLD(reg) GET_BITFIELD(reg, 0, 14)
1270 @@ -1340,7 +1342,7 @@ static void knl_show_mc_route(u32 reg, char *s)
1271 */
1272 static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1273 {
1274 - u64 sad_base, sad_size, sad_limit = 0;
1275 + u64 sad_base, sad_limit = 0;
1276 u64 tad_base, tad_size, tad_limit, tad_deadspace, tad_livespace;
1277 int sad_rule = 0;
1278 int tad_rule = 0;
1279 @@ -1427,7 +1429,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes)
1280 edram_only = KNL_EDRAM_ONLY(dram_rule);
1281
1282 sad_limit = pvt->info.sad_limit(dram_rule)+1;
1283 - sad_size = sad_limit - sad_base;
1284
1285 pci_read_config_dword(pvt->pci_sad0,
1286 pvt->info.interleave_list[sad_rule], &interleave_reg);
1287 @@ -2952,7 +2953,7 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1288 struct mem_ctl_info *new_mci;
1289 struct sbridge_pvt *pvt = mci->pvt_info;
1290 enum hw_event_mc_err_type tp_event;
1291 - char *type, *optype, msg[256];
1292 + char *optype, msg[256];
1293 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1294 bool overflow = GET_BITFIELD(m->status, 62, 62);
1295 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
1296 @@ -2981,14 +2982,11 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1297 if (uncorrected_error) {
1298 core_err_cnt = 1;
1299 if (ripv) {
1300 - type = "FATAL";
1301 - tp_event = HW_EVENT_ERR_FATAL;
1302 - } else {
1303 - type = "NON_FATAL";
1304 tp_event = HW_EVENT_ERR_UNCORRECTED;
1305 + } else {
1306 + tp_event = HW_EVENT_ERR_FATAL;
1307 }
1308 } else {
1309 - type = "CORRECTED";
1310 tp_event = HW_EVENT_ERR_CORRECTED;
1311 }
1312
1313 @@ -3200,7 +3198,6 @@ static struct notifier_block sbridge_mce_dec = {
1314 static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1315 {
1316 struct mem_ctl_info *mci = sbridge_dev->mci;
1317 - struct sbridge_pvt *pvt;
1318
1319 if (unlikely(!mci || !mci->pvt_info)) {
1320 edac_dbg(0, "MC: dev = %p\n", &sbridge_dev->pdev[0]->dev);
1321 @@ -3209,8 +3206,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1322 return;
1323 }
1324
1325 - pvt = mci->pvt_info;
1326 -
1327 edac_dbg(0, "MC: mci = %p, dev = %p\n",
1328 mci, &sbridge_dev->pdev[0]->dev);
1329
1330 diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c
1331 index 2177ad765bd16..99dea4f66b5e9 100644
1332 --- a/drivers/edac/skx_common.c
1333 +++ b/drivers/edac/skx_common.c
1334 @@ -475,7 +475,7 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
1335 struct decoded_addr *res)
1336 {
1337 enum hw_event_mc_err_type tp_event;
1338 - char *type, *optype;
1339 + char *optype;
1340 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1341 bool overflow = GET_BITFIELD(m->status, 62, 62);
1342 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
1343 @@ -490,14 +490,11 @@ static void skx_mce_output_error(struct mem_ctl_info *mci,
1344 if (uncorrected_error) {
1345 core_err_cnt = 1;
1346 if (ripv) {
1347 - type = "FATAL";
1348 - tp_event = HW_EVENT_ERR_FATAL;
1349 - } else {
1350 - type = "NON_FATAL";
1351 tp_event = HW_EVENT_ERR_UNCORRECTED;
1352 + } else {
1353 + tp_event = HW_EVENT_ERR_FATAL;
1354 }
1355 } else {
1356 - type = "CORRECTED";
1357 tp_event = HW_EVENT_ERR_CORRECTED;
1358 }
1359
1360 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1361 index ece55c8fa6733..cda0a76a733d3 100644
1362 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1363 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
1364 @@ -719,8 +719,10 @@ amdgpu_connector_lvds_detect(struct drm_connector *connector, bool force)
1365
1366 if (!drm_kms_helper_is_poll_worker()) {
1367 r = pm_runtime_get_sync(connector->dev->dev);
1368 - if (r < 0)
1369 + if (r < 0) {
1370 + pm_runtime_put_autosuspend(connector->dev->dev);
1371 return connector_status_disconnected;
1372 + }
1373 }
1374
1375 if (encoder) {
1376 @@ -857,8 +859,10 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
1377
1378 if (!drm_kms_helper_is_poll_worker()) {
1379 r = pm_runtime_get_sync(connector->dev->dev);
1380 - if (r < 0)
1381 + if (r < 0) {
1382 + pm_runtime_put_autosuspend(connector->dev->dev);
1383 return connector_status_disconnected;
1384 + }
1385 }
1386
1387 encoder = amdgpu_connector_best_single_encoder(connector);
1388 @@ -980,8 +984,10 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
1389
1390 if (!drm_kms_helper_is_poll_worker()) {
1391 r = pm_runtime_get_sync(connector->dev->dev);
1392 - if (r < 0)
1393 + if (r < 0) {
1394 + pm_runtime_put_autosuspend(connector->dev->dev);
1395 return connector_status_disconnected;
1396 + }
1397 }
1398
1399 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
1400 @@ -1330,8 +1336,10 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
1401
1402 if (!drm_kms_helper_is_poll_worker()) {
1403 r = pm_runtime_get_sync(connector->dev->dev);
1404 - if (r < 0)
1405 + if (r < 0) {
1406 + pm_runtime_put_autosuspend(connector->dev->dev);
1407 return connector_status_disconnected;
1408 + }
1409 }
1410
1411 if (!force && amdgpu_connector_check_hpd_status_unchanged(connector)) {
1412 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1413 index 82efc1e22e611..e0aed42d9cbda 100644
1414 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1415 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
1416 @@ -282,7 +282,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
1417
1418 ret = pm_runtime_get_sync(dev->dev);
1419 if (ret < 0)
1420 - return ret;
1421 + goto out;
1422
1423 ret = drm_crtc_helper_set_config(set, ctx);
1424
1425 @@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
1426 take the current one */
1427 if (active && !adev->have_disp_power_ref) {
1428 adev->have_disp_power_ref = true;
1429 - return ret;
1430 + goto out;
1431 }
1432 /* if we have no active crtcs, then drop the power ref
1433 we got before */
1434 @@ -306,6 +306,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set,
1435 adev->have_disp_power_ref = false;
1436 }
1437
1438 +out:
1439 /* drop the power reference we got coming in here */
1440 pm_runtime_put_autosuspend(dev->dev);
1441 return ret;
1442 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1443 index 05d114a72ca1e..fa2c0f29ad4de 100644
1444 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1445 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
1446 @@ -1286,11 +1286,12 @@ long amdgpu_drm_ioctl(struct file *filp,
1447 dev = file_priv->minor->dev;
1448 ret = pm_runtime_get_sync(dev->dev);
1449 if (ret < 0)
1450 - return ret;
1451 + goto out;
1452
1453 ret = drm_ioctl(filp, cmd, arg);
1454
1455 pm_runtime_mark_last_busy(dev->dev);
1456 +out:
1457 pm_runtime_put_autosuspend(dev->dev);
1458 return ret;
1459 }
1460 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1461 index 2a7da26008a27..59fd9ebf3a58b 100644
1462 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1463 +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
1464 @@ -638,8 +638,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
1465 * in the bitfields */
1466 if (se_num == AMDGPU_INFO_MMR_SE_INDEX_MASK)
1467 se_num = 0xffffffff;
1468 + else if (se_num >= AMDGPU_GFX_MAX_SE)
1469 + return -EINVAL;
1470 if (sh_num == AMDGPU_INFO_MMR_SH_INDEX_MASK)
1471 sh_num = 0xffffffff;
1472 + else if (sh_num >= AMDGPU_GFX_MAX_SH_PER_SE)
1473 + return -EINVAL;
1474
1475 if (info->read_mmr_reg.count > 128)
1476 return -EINVAL;
1477 @@ -976,7 +980,7 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
1478
1479 r = pm_runtime_get_sync(dev->dev);
1480 if (r < 0)
1481 - return r;
1482 + goto pm_put;
1483
1484 fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
1485 if (unlikely(!fpriv)) {
1486 @@ -1027,6 +1031,7 @@ error_pasid:
1487
1488 out_suspend:
1489 pm_runtime_mark_last_busy(dev->dev);
1490 +pm_put:
1491 pm_runtime_put_autosuspend(dev->dev);
1492
1493 return r;
1494 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1495 index 64d96eb0a2337..19876c90be0e1 100644
1496 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1497 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
1498 @@ -4094,10 +4094,8 @@ static void gfx_v10_0_update_medium_grain_clock_gating(struct amdgpu_device *ade
1499 def = data = RREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE);
1500 data &= ~(RLC_CGTT_MGCG_OVERRIDE__GRBM_CGTT_SCLK_OVERRIDE_MASK |
1501 RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGCG_OVERRIDE_MASK |
1502 - RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK);
1503 -
1504 - /* only for Vega10 & Raven1 */
1505 - data |= RLC_CGTT_MGCG_OVERRIDE__RLC_CGTT_SCLK_OVERRIDE_MASK;
1506 + RLC_CGTT_MGCG_OVERRIDE__GFXIP_MGLS_OVERRIDE_MASK |
1507 + RLC_CGTT_MGCG_OVERRIDE__ENABLE_CGTS_LEGACY_MASK);
1508
1509 if (def != data)
1510 WREG32_SOC15(GC, 0, mmRLC_CGTT_MGCG_OVERRIDE, data);
1511 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
1512 index 7551761f2aa97..a49e2ab071d68 100644
1513 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
1514 +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
1515 @@ -612,8 +612,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
1516
1517 ret = kobject_init_and_add(dev->kobj_node, &node_type,
1518 sys_props.kobj_nodes, "%d", id);
1519 - if (ret < 0)
1520 + if (ret < 0) {
1521 + kobject_put(dev->kobj_node);
1522 return ret;
1523 + }
1524
1525 dev->kobj_mem = kobject_create_and_add("mem_banks", dev->kobj_node);
1526 if (!dev->kobj_mem)
1527 @@ -660,8 +662,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
1528 return -ENOMEM;
1529 ret = kobject_init_and_add(mem->kobj, &mem_type,
1530 dev->kobj_mem, "%d", i);
1531 - if (ret < 0)
1532 + if (ret < 0) {
1533 + kobject_put(mem->kobj);
1534 return ret;
1535 + }
1536
1537 mem->attr.name = "properties";
1538 mem->attr.mode = KFD_SYSFS_FILE_MODE;
1539 @@ -679,8 +683,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
1540 return -ENOMEM;
1541 ret = kobject_init_and_add(cache->kobj, &cache_type,
1542 dev->kobj_cache, "%d", i);
1543 - if (ret < 0)
1544 + if (ret < 0) {
1545 + kobject_put(cache->kobj);
1546 return ret;
1547 + }
1548
1549 cache->attr.name = "properties";
1550 cache->attr.mode = KFD_SYSFS_FILE_MODE;
1551 @@ -698,8 +704,10 @@ static int kfd_build_sysfs_node_entry(struct kfd_topology_device *dev,
1552 return -ENOMEM;
1553 ret = kobject_init_and_add(iolink->kobj, &iolink_type,
1554 dev->kobj_iolink, "%d", i);
1555 - if (ret < 0)
1556 + if (ret < 0) {
1557 + kobject_put(iolink->kobj);
1558 return ret;
1559 + }
1560
1561 iolink->attr.name = "properties";
1562 iolink->attr.mode = KFD_SYSFS_FILE_MODE;
1563 @@ -779,8 +787,10 @@ static int kfd_topology_update_sysfs(void)
1564 ret = kobject_init_and_add(sys_props.kobj_topology,
1565 &sysprops_type, &kfd_device->kobj,
1566 "topology");
1567 - if (ret < 0)
1568 + if (ret < 0) {
1569 + kobject_put(sys_props.kobj_topology);
1570 return ret;
1571 + }
1572
1573 sys_props.kobj_nodes = kobject_create_and_add("nodes",
1574 sys_props.kobj_topology);
1575 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1576 index 2c0eb7140ca0e..247f53d41993d 100644
1577 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1578 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
1579 @@ -7229,6 +7229,30 @@ cleanup:
1580 *out_type = update_type;
1581 return ret;
1582 }
1583 +#if defined(CONFIG_DRM_AMD_DC_DCN)
1584 +static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc)
1585 +{
1586 + struct drm_connector *connector;
1587 + struct drm_connector_state *conn_state;
1588 + struct amdgpu_dm_connector *aconnector = NULL;
1589 + int i;
1590 + for_each_new_connector_in_state(state, connector, conn_state, i) {
1591 + if (conn_state->crtc != crtc)
1592 + continue;
1593 +
1594 + aconnector = to_amdgpu_dm_connector(connector);
1595 + if (!aconnector->port || !aconnector->mst_port)
1596 + aconnector = NULL;
1597 + else
1598 + break;
1599 + }
1600 +
1601 + if (!aconnector)
1602 + return 0;
1603 +
1604 + return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_port->mst_mgr);
1605 +}
1606 +#endif
1607
1608 /**
1609 * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM.
1610 @@ -7282,6 +7306,40 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
1611 if (ret)
1612 goto fail;
1613
1614 + /* Check connector changes */
1615 + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
1616 + struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state);
1617 + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state);
1618 +
1619 + /* Skip connectors that are disabled or part of modeset already. */
1620 + if (!old_con_state->crtc && !new_con_state->crtc)
1621 + continue;
1622 +
1623 + if (!new_con_state->crtc)
1624 + continue;
1625 +
1626 + new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc);
1627 + if (IS_ERR(new_crtc_state)) {
1628 + ret = PTR_ERR(new_crtc_state);
1629 + goto fail;
1630 + }
1631 +
1632 + if (dm_old_con_state->abm_level !=
1633 + dm_new_con_state->abm_level)
1634 + new_crtc_state->connectors_changed = true;
1635 + }
1636 +
1637 +#if defined(CONFIG_DRM_AMD_DC_DCN)
1638 + if (adev->asic_type >= CHIP_NAVI10) {
1639 + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1640 + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) {
1641 + ret = add_affected_mst_dsc_crtcs(state, crtc);
1642 + if (ret)
1643 + goto fail;
1644 + }
1645 + }
1646 + }
1647 +#endif
1648 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
1649 if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
1650 !new_crtc_state->color_mgmt_changed &&
1651 diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
1652 index 9aa258f3550b6..ddf66046616d6 100644
1653 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
1654 +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
1655 @@ -121,35 +121,35 @@ void enc1_update_generic_info_packet(
1656 switch (packet_index) {
1657 case 0:
1658 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
1659 - AFMT_GENERIC0_FRAME_UPDATE, 1);
1660 + AFMT_GENERIC0_IMMEDIATE_UPDATE, 1);
1661 break;
1662 case 1:
1663 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
1664 - AFMT_GENERIC1_FRAME_UPDATE, 1);
1665 + AFMT_GENERIC1_IMMEDIATE_UPDATE, 1);
1666 break;
1667 case 2:
1668 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
1669 - AFMT_GENERIC2_FRAME_UPDATE, 1);
1670 + AFMT_GENERIC2_IMMEDIATE_UPDATE, 1);
1671 break;
1672 case 3:
1673 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
1674 - AFMT_GENERIC3_FRAME_UPDATE, 1);
1675 + AFMT_GENERIC3_IMMEDIATE_UPDATE, 1);
1676 break;
1677 case 4:
1678 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
1679 - AFMT_GENERIC4_FRAME_UPDATE, 1);
1680 + AFMT_GENERIC4_IMMEDIATE_UPDATE, 1);
1681 break;
1682 case 5:
1683 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
1684 - AFMT_GENERIC5_FRAME_UPDATE, 1);
1685 + AFMT_GENERIC5_IMMEDIATE_UPDATE, 1);
1686 break;
1687 case 6:
1688 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
1689 - AFMT_GENERIC6_FRAME_UPDATE, 1);
1690 + AFMT_GENERIC6_IMMEDIATE_UPDATE, 1);
1691 break;
1692 case 7:
1693 REG_UPDATE(AFMT_VBI_PACKET_CONTROL1,
1694 - AFMT_GENERIC7_FRAME_UPDATE, 1);
1695 + AFMT_GENERIC7_IMMEDIATE_UPDATE, 1);
1696 break;
1697 default:
1698 break;
1699 diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
1700 index a512cbea00d17..b9656614950e3 100644
1701 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
1702 +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h
1703 @@ -275,7 +275,14 @@ struct dcn10_stream_enc_registers {
1704 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_FRAME_UPDATE, mask_sh),\
1705 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_FRAME_UPDATE, mask_sh),\
1706 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_FRAME_UPDATE, mask_sh),\
1707 + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC0_IMMEDIATE_UPDATE, mask_sh),\
1708 + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC1_IMMEDIATE_UPDATE, mask_sh),\
1709 + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC2_IMMEDIATE_UPDATE, mask_sh),\
1710 + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC3_IMMEDIATE_UPDATE, mask_sh),\
1711 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC4_IMMEDIATE_UPDATE, mask_sh),\
1712 + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_IMMEDIATE_UPDATE, mask_sh),\
1713 + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_IMMEDIATE_UPDATE, mask_sh),\
1714 + SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_IMMEDIATE_UPDATE, mask_sh),\
1715 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC5_FRAME_UPDATE, mask_sh),\
1716 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC6_FRAME_UPDATE, mask_sh),\
1717 SE_SF(DIG0_AFMT_VBI_PACKET_CONTROL1, AFMT_GENERIC7_FRAME_UPDATE, mask_sh),\
1718 @@ -339,7 +346,14 @@ struct dcn10_stream_enc_registers {
1719 type AFMT_GENERIC2_FRAME_UPDATE;\
1720 type AFMT_GENERIC3_FRAME_UPDATE;\
1721 type AFMT_GENERIC4_FRAME_UPDATE;\
1722 + type AFMT_GENERIC0_IMMEDIATE_UPDATE;\
1723 + type AFMT_GENERIC1_IMMEDIATE_UPDATE;\
1724 + type AFMT_GENERIC2_IMMEDIATE_UPDATE;\
1725 + type AFMT_GENERIC3_IMMEDIATE_UPDATE;\
1726 type AFMT_GENERIC4_IMMEDIATE_UPDATE;\
1727 + type AFMT_GENERIC5_IMMEDIATE_UPDATE;\
1728 + type AFMT_GENERIC6_IMMEDIATE_UPDATE;\
1729 + type AFMT_GENERIC7_IMMEDIATE_UPDATE;\
1730 type AFMT_GENERIC5_FRAME_UPDATE;\
1731 type AFMT_GENERIC6_FRAME_UPDATE;\
1732 type AFMT_GENERIC7_FRAME_UPDATE;\
1733 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
1734 index fed3fc4bb57a9..6322e57893db2 100644
1735 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
1736 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c
1737 @@ -209,8 +209,7 @@ static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr *hwmgr, uint32_t clo
1738 {
1739 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1740
1741 - if (smu10_data->need_min_deep_sleep_dcefclk &&
1742 - smu10_data->deep_sleep_dcefclk != clock) {
1743 + if (clock && smu10_data->deep_sleep_dcefclk != clock) {
1744 smu10_data->deep_sleep_dcefclk = clock;
1745 smum_send_msg_to_smc_with_parameter(hwmgr,
1746 PPSMC_MSG_SetMinDeepSleepDcefclk,
1747 @@ -223,8 +222,7 @@ static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t c
1748 {
1749 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1750
1751 - if (smu10_data->dcf_actual_hard_min_freq &&
1752 - smu10_data->dcf_actual_hard_min_freq != clock) {
1753 + if (clock && smu10_data->dcf_actual_hard_min_freq != clock) {
1754 smu10_data->dcf_actual_hard_min_freq = clock;
1755 smum_send_msg_to_smc_with_parameter(hwmgr,
1756 PPSMC_MSG_SetHardMinDcefclkByFreq,
1757 @@ -237,8 +235,7 @@ static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr *hwmgr, uint32_t cloc
1758 {
1759 struct smu10_hwmgr *smu10_data = (struct smu10_hwmgr *)(hwmgr->backend);
1760
1761 - if (smu10_data->f_actual_hard_min_freq &&
1762 - smu10_data->f_actual_hard_min_freq != clock) {
1763 + if (clock && smu10_data->f_actual_hard_min_freq != clock) {
1764 smu10_data->f_actual_hard_min_freq = clock;
1765 smum_send_msg_to_smc_with_parameter(hwmgr,
1766 PPSMC_MSG_SetHardMinFclkByFreq,
1767 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
1768 index ba8763daa3808..36a17caa3761d 100644
1769 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
1770 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_thermal.c
1771 @@ -364,17 +364,19 @@ int vega10_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1772 static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
1773 struct PP_TemperatureRange *range)
1774 {
1775 + struct phm_ppt_v2_information *pp_table_info =
1776 + (struct phm_ppt_v2_information *)(hwmgr->pptable);
1777 + struct phm_tdp_table *tdp_table = pp_table_info->tdp_table;
1778 struct amdgpu_device *adev = hwmgr->adev;
1779 - int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP *
1780 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1781 - int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP *
1782 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1783 + int low = VEGA10_THERMAL_MINIMUM_ALERT_TEMP;
1784 + int high = VEGA10_THERMAL_MAXIMUM_ALERT_TEMP;
1785 uint32_t val;
1786
1787 - if (low < range->min)
1788 - low = range->min;
1789 - if (high > range->max)
1790 - high = range->max;
1791 + /* compare them in unit celsius degree */
1792 + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
1793 + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1794 + if (high > tdp_table->usSoftwareShutdownTemp)
1795 + high = tdp_table->usSoftwareShutdownTemp;
1796
1797 if (low > high)
1798 return -EINVAL;
1799 @@ -383,8 +385,8 @@ static int vega10_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
1800
1801 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1802 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1803 - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1804 - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1805 + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
1806 + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
1807 val &= (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK) &
1808 (~THM_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK) &
1809 (~THM_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1810 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
1811 index 904eb2c9155b4..40e7c72eeae00 100644
1812 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
1813 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_thermal.c
1814 @@ -170,17 +170,18 @@ int vega12_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1815 static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
1816 struct PP_TemperatureRange *range)
1817 {
1818 + struct phm_ppt_v3_information *pptable_information =
1819 + (struct phm_ppt_v3_information *)hwmgr->pptable;
1820 struct amdgpu_device *adev = hwmgr->adev;
1821 - int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP *
1822 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1823 - int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP *
1824 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1825 + int low = VEGA12_THERMAL_MINIMUM_ALERT_TEMP;
1826 + int high = VEGA12_THERMAL_MAXIMUM_ALERT_TEMP;
1827 uint32_t val;
1828
1829 - if (low < range->min)
1830 - low = range->min;
1831 - if (high > range->max)
1832 - high = range->max;
1833 + /* compare them in unit celsius degree */
1834 + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
1835 + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1836 + if (high > pptable_information->us_software_shutdown_temp)
1837 + high = pptable_information->us_software_shutdown_temp;
1838
1839 if (low > high)
1840 return -EINVAL;
1841 @@ -189,8 +190,8 @@ static int vega12_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
1842
1843 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1844 val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1845 - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1846 - val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1847 + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
1848 + val = REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
1849 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1850
1851 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1852 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
1853 index f5915308e643a..947e4fa3c5e68 100644
1854 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
1855 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
1856 @@ -981,27 +981,15 @@ static int vega20_disable_all_smu_features(struct pp_hwmgr *hwmgr)
1857 {
1858 struct vega20_hwmgr *data =
1859 (struct vega20_hwmgr *)(hwmgr->backend);
1860 - uint64_t features_enabled;
1861 - int i;
1862 - bool enabled;
1863 - int ret = 0;
1864 + int i, ret = 0;
1865
1866 PP_ASSERT_WITH_CODE((ret = smum_send_msg_to_smc(hwmgr,
1867 PPSMC_MSG_DisableAllSmuFeatures)) == 0,
1868 "[DisableAllSMUFeatures] Failed to disable all smu features!",
1869 return ret);
1870
1871 - ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
1872 - PP_ASSERT_WITH_CODE(!ret,
1873 - "[DisableAllSMUFeatures] Failed to get enabled smc features!",
1874 - return ret);
1875 -
1876 - for (i = 0; i < GNLD_FEATURES_MAX; i++) {
1877 - enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
1878 - true : false;
1879 - data->smu_features[i].enabled = enabled;
1880 - data->smu_features[i].supported = enabled;
1881 - }
1882 + for (i = 0; i < GNLD_FEATURES_MAX; i++)
1883 + data->smu_features[i].enabled = 0;
1884
1885 return 0;
1886 }
1887 @@ -1652,12 +1640,6 @@ static void vega20_init_powergate_state(struct pp_hwmgr *hwmgr)
1888
1889 data->uvd_power_gated = true;
1890 data->vce_power_gated = true;
1891 -
1892 - if (data->smu_features[GNLD_DPM_UVD].enabled)
1893 - data->uvd_power_gated = false;
1894 -
1895 - if (data->smu_features[GNLD_DPM_VCE].enabled)
1896 - data->vce_power_gated = false;
1897 }
1898
1899 static int vega20_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
1900 @@ -3211,10 +3193,11 @@ static int vega20_get_ppfeature_status(struct pp_hwmgr *hwmgr, char *buf)
1901
1902 static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfeature_masks)
1903 {
1904 - uint64_t features_enabled;
1905 - uint64_t features_to_enable;
1906 - uint64_t features_to_disable;
1907 - int ret = 0;
1908 + struct vega20_hwmgr *data =
1909 + (struct vega20_hwmgr *)(hwmgr->backend);
1910 + uint64_t features_enabled, features_to_enable, features_to_disable;
1911 + int i, ret = 0;
1912 + bool enabled;
1913
1914 if (new_ppfeature_masks >= (1ULL << GNLD_FEATURES_MAX))
1915 return -EINVAL;
1916 @@ -3243,6 +3226,17 @@ static int vega20_set_ppfeature_status(struct pp_hwmgr *hwmgr, uint64_t new_ppfe
1917 return ret;
1918 }
1919
1920 + /* Update the cached feature enablement state */
1921 + ret = vega20_get_enabled_smc_features(hwmgr, &features_enabled);
1922 + if (ret)
1923 + return ret;
1924 +
1925 + for (i = 0; i < GNLD_FEATURES_MAX; i++) {
1926 + enabled = (features_enabled & data->smu_features[i].smu_feature_bitmap) ?
1927 + true : false;
1928 + data->smu_features[i].enabled = enabled;
1929 + }
1930 +
1931 return 0;
1932 }
1933
1934 diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
1935 index ede54e87e287b..ce56b93871e8f 100644
1936 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
1937 +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_thermal.c
1938 @@ -240,17 +240,18 @@ int vega20_thermal_get_temperature(struct pp_hwmgr *hwmgr)
1939 static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
1940 struct PP_TemperatureRange *range)
1941 {
1942 + struct phm_ppt_v3_information *pptable_information =
1943 + (struct phm_ppt_v3_information *)hwmgr->pptable;
1944 struct amdgpu_device *adev = hwmgr->adev;
1945 - int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP *
1946 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1947 - int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP *
1948 - PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1949 + int low = VEGA20_THERMAL_MINIMUM_ALERT_TEMP;
1950 + int high = VEGA20_THERMAL_MAXIMUM_ALERT_TEMP;
1951 uint32_t val;
1952
1953 - if (low < range->min)
1954 - low = range->min;
1955 - if (high > range->max)
1956 - high = range->max;
1957 + /* compare them in unit celsius degree */
1958 + if (low < range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES)
1959 + low = range->min / PP_TEMPERATURE_UNITS_PER_CENTIGRADES;
1960 + if (high > pptable_information->us_software_shutdown_temp)
1961 + high = pptable_information->us_software_shutdown_temp;
1962
1963 if (low > high)
1964 return -EINVAL;
1965 @@ -259,8 +260,8 @@ static int vega20_thermal_set_temperature_range(struct pp_hwmgr *hwmgr,
1966
1967 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, MAX_IH_CREDIT, 5);
1968 val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, THERM_IH_HW_ENA, 1);
1969 - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, (high / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1970 - val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, (low / PP_TEMPERATURE_UNITS_PER_CENTIGRADES));
1971 + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTH, high);
1972 + val = CGS_REG_SET_FIELD(val, THM_THERMAL_INT_CTRL, DIG_THERM_INTL, low);
1973 val = val & (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK);
1974
1975 WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_CTRL, val);
1976 diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
1977 index 21417ac8e878e..a9a69760c18d0 100644
1978 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c
1979 +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
1980 @@ -1207,6 +1207,12 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
1981 return dst;
1982 }
1983
1984 +static inline bool cmd_desc_is(const struct drm_i915_cmd_descriptor * const desc,
1985 + const u32 cmd)
1986 +{
1987 + return desc->cmd.value == (cmd & desc->cmd.mask);
1988 +}
1989 +
1990 static bool check_cmd(const struct intel_engine_cs *engine,
1991 const struct drm_i915_cmd_descriptor *desc,
1992 const u32 *cmd, u32 length)
1993 @@ -1245,19 +1251,19 @@ static bool check_cmd(const struct intel_engine_cs *engine,
1994 * allowed mask/value pair given in the whitelist entry.
1995 */
1996 if (reg->mask) {
1997 - if (desc->cmd.value == MI_LOAD_REGISTER_MEM) {
1998 + if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) {
1999 DRM_DEBUG_DRIVER("CMD: Rejected LRM to masked register 0x%08X\n",
2000 reg_addr);
2001 return false;
2002 }
2003
2004 - if (desc->cmd.value == MI_LOAD_REGISTER_REG) {
2005 + if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) {
2006 DRM_DEBUG_DRIVER("CMD: Rejected LRR to masked register 0x%08X\n",
2007 reg_addr);
2008 return false;
2009 }
2010
2011 - if (desc->cmd.value == MI_LOAD_REGISTER_IMM(1) &&
2012 + if (cmd_desc_is(desc, MI_LOAD_REGISTER_IMM(1)) &&
2013 (offset + 2 > length ||
2014 (cmd[offset + 1] & reg->mask) != reg->value)) {
2015 DRM_DEBUG_DRIVER("CMD: Rejected LRI to masked register 0x%08X\n",
2016 @@ -1488,7 +1494,7 @@ int intel_engine_cmd_parser(struct i915_gem_context *ctx,
2017 goto err;
2018 }
2019
2020 - if (desc->cmd.value == MI_BATCH_BUFFER_START) {
2021 + if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
2022 ret = check_bbstart(ctx, cmd, offset, length,
2023 batch_len, batch_start,
2024 shadow_batch_start);
2025 diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
2026 index 376fca6ca9f47..7e6179fe63f86 100644
2027 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c
2028 +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
2029 @@ -375,9 +375,9 @@ static void ingenic_drm_plane_atomic_update(struct drm_plane *plane,
2030
2031 if (state && state->fb) {
2032 addr = drm_fb_cma_get_gem_addr(state->fb, state, 0);
2033 - width = state->crtc->state->adjusted_mode.hdisplay;
2034 - height = state->crtc->state->adjusted_mode.vdisplay;
2035 - cpp = state->fb->format->cpp[plane->index];
2036 + width = state->src_w >> 16;
2037 + height = state->src_h >> 16;
2038 + cpp = state->fb->format->cpp[0];
2039
2040 priv->dma_hwdesc->addr = addr;
2041 priv->dma_hwdesc->cmd = width * height * cpp / 4;
2042 diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
2043 index 048c8be426f32..053da39da1cc0 100644
2044 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
2045 +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
2046 @@ -350,7 +350,7 @@ int adreno_hw_init(struct msm_gpu *gpu)
2047 ring->next = ring->start;
2048
2049 /* reset completed fence seqno: */
2050 - ring->memptrs->fence = ring->seqno;
2051 + ring->memptrs->fence = ring->fctx->completed_fence;
2052 ring->memptrs->rptr = 0;
2053 }
2054
2055 diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2056 index d735ea7e2d886..419a02260bfa7 100644
2057 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
2058 +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
2059 @@ -2032,8 +2032,10 @@ nv50_disp_atomic_commit(struct drm_device *dev,
2060 int ret, i;
2061
2062 ret = pm_runtime_get_sync(dev->dev);
2063 - if (ret < 0 && ret != -EACCES)
2064 + if (ret < 0 && ret != -EACCES) {
2065 + pm_runtime_put_autosuspend(dev->dev);
2066 return ret;
2067 + }
2068
2069 ret = drm_atomic_helper_setup_commit(state, nonblock);
2070 if (ret)
2071 diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
2072 index eb31c5b6c8e93..0994aee7671ad 100644
2073 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c
2074 +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
2075 @@ -568,8 +568,10 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
2076 pm_runtime_get_noresume(dev->dev);
2077 } else {
2078 ret = pm_runtime_get_sync(dev->dev);
2079 - if (ret < 0 && ret != -EACCES)
2080 + if (ret < 0 && ret != -EACCES) {
2081 + pm_runtime_put_autosuspend(dev->dev);
2082 return conn_status;
2083 + }
2084 }
2085
2086 nv_encoder = nouveau_connector_ddc_detect(connector);
2087 diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
2088 index 5cf2381f667e2..c09ea357e88f0 100644
2089 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
2090 +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
2091 @@ -189,8 +189,10 @@ nouveau_fbcon_open(struct fb_info *info, int user)
2092 struct nouveau_fbdev *fbcon = info->par;
2093 struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
2094 int ret = pm_runtime_get_sync(drm->dev->dev);
2095 - if (ret < 0 && ret != -EACCES)
2096 + if (ret < 0 && ret != -EACCES) {
2097 + pm_runtime_put(drm->dev->dev);
2098 return ret;
2099 + }
2100 return 0;
2101 }
2102
2103 diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
2104 index b684cd719612b..bc63f4cecf5d5 100644
2105 --- a/drivers/gpu/drm/radeon/radeon_connectors.c
2106 +++ b/drivers/gpu/drm/radeon/radeon_connectors.c
2107 @@ -883,8 +883,10 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
2108
2109 if (!drm_kms_helper_is_poll_worker()) {
2110 r = pm_runtime_get_sync(connector->dev->dev);
2111 - if (r < 0)
2112 + if (r < 0) {
2113 + pm_runtime_put_autosuspend(connector->dev->dev);
2114 return connector_status_disconnected;
2115 + }
2116 }
2117
2118 if (encoder) {
2119 @@ -1029,8 +1031,10 @@ radeon_vga_detect(struct drm_connector *connector, bool force)
2120
2121 if (!drm_kms_helper_is_poll_worker()) {
2122 r = pm_runtime_get_sync(connector->dev->dev);
2123 - if (r < 0)
2124 + if (r < 0) {
2125 + pm_runtime_put_autosuspend(connector->dev->dev);
2126 return connector_status_disconnected;
2127 + }
2128 }
2129
2130 encoder = radeon_best_single_encoder(connector);
2131 @@ -1167,8 +1171,10 @@ radeon_tv_detect(struct drm_connector *connector, bool force)
2132
2133 if (!drm_kms_helper_is_poll_worker()) {
2134 r = pm_runtime_get_sync(connector->dev->dev);
2135 - if (r < 0)
2136 + if (r < 0) {
2137 + pm_runtime_put_autosuspend(connector->dev->dev);
2138 return connector_status_disconnected;
2139 + }
2140 }
2141
2142 encoder = radeon_best_single_encoder(connector);
2143 @@ -1251,8 +1257,10 @@ radeon_dvi_detect(struct drm_connector *connector, bool force)
2144
2145 if (!drm_kms_helper_is_poll_worker()) {
2146 r = pm_runtime_get_sync(connector->dev->dev);
2147 - if (r < 0)
2148 + if (r < 0) {
2149 + pm_runtime_put_autosuspend(connector->dev->dev);
2150 return connector_status_disconnected;
2151 + }
2152 }
2153
2154 if (radeon_connector->detected_hpd_without_ddc) {
2155 @@ -1666,8 +1674,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
2156
2157 if (!drm_kms_helper_is_poll_worker()) {
2158 r = pm_runtime_get_sync(connector->dev->dev);
2159 - if (r < 0)
2160 + if (r < 0) {
2161 + pm_runtime_put_autosuspend(connector->dev->dev);
2162 return connector_status_disconnected;
2163 + }
2164 }
2165
2166 if (!force && radeon_check_hpd_status_unchanged(connector)) {
2167 diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
2168 index 4be49c1aef518..09894a1d343f3 100644
2169 --- a/drivers/gpu/drm/xen/xen_drm_front.c
2170 +++ b/drivers/gpu/drm/xen/xen_drm_front.c
2171 @@ -400,7 +400,7 @@ static int xen_drm_drv_dumb_create(struct drm_file *filp,
2172 args->size = args->pitch * args->height;
2173
2174 obj = xen_drm_front_gem_create(dev, args->size);
2175 - if (IS_ERR_OR_NULL(obj)) {
2176 + if (IS_ERR(obj)) {
2177 ret = PTR_ERR(obj);
2178 goto fail;
2179 }
2180 diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
2181 index f0b85e0941114..4ec8a49241e17 100644
2182 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
2183 +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
2184 @@ -83,7 +83,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
2185
2186 size = round_up(size, PAGE_SIZE);
2187 xen_obj = gem_create_obj(dev, size);
2188 - if (IS_ERR_OR_NULL(xen_obj))
2189 + if (IS_ERR(xen_obj))
2190 return xen_obj;
2191
2192 if (drm_info->front_info->cfg.be_alloc) {
2193 @@ -117,7 +117,7 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
2194 */
2195 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
2196 xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
2197 - if (IS_ERR_OR_NULL(xen_obj->pages)) {
2198 + if (IS_ERR(xen_obj->pages)) {
2199 ret = PTR_ERR(xen_obj->pages);
2200 xen_obj->pages = NULL;
2201 goto fail;
2202 @@ -136,7 +136,7 @@ struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
2203 struct xen_gem_object *xen_obj;
2204
2205 xen_obj = gem_create(dev, size);
2206 - if (IS_ERR_OR_NULL(xen_obj))
2207 + if (IS_ERR(xen_obj))
2208 return ERR_CAST(xen_obj);
2209
2210 return &xen_obj->base;
2211 @@ -194,7 +194,7 @@ xen_drm_front_gem_import_sg_table(struct drm_device *dev,
2212
2213 size = attach->dmabuf->size;
2214 xen_obj = gem_create_obj(dev, size);
2215 - if (IS_ERR_OR_NULL(xen_obj))
2216 + if (IS_ERR(xen_obj))
2217 return ERR_CAST(xen_obj);
2218
2219 ret = gem_alloc_pages_array(xen_obj, size);
2220 diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
2221 index 21ad1c359b613..e4dedbb184ab7 100644
2222 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
2223 +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
2224 @@ -60,7 +60,7 @@ fb_create(struct drm_device *dev, struct drm_file *filp,
2225 int ret;
2226
2227 fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
2228 - if (IS_ERR_OR_NULL(fb))
2229 + if (IS_ERR(fb))
2230 return fb;
2231
2232 gem_obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
2233 diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
2234 index 73e4590ea9c94..09df5ecc2c79b 100644
2235 --- a/drivers/hid/hid-ids.h
2236 +++ b/drivers/hid/hid-ids.h
2237 @@ -771,6 +771,7 @@
2238 #define USB_DEVICE_ID_LOGITECH_G27_WHEEL 0xc29b
2239 #define USB_DEVICE_ID_LOGITECH_WII_WHEEL 0xc29c
2240 #define USB_DEVICE_ID_LOGITECH_ELITE_KBD 0xc30a
2241 +#define USB_DEVICE_ID_LOGITECH_GROUP_AUDIO 0x0882
2242 #define USB_DEVICE_ID_S510_RECEIVER 0xc50c
2243 #define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
2244 #define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
2245 diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c
2246 index a49fa2b047cba..b3dd60897ffda 100644
2247 --- a/drivers/hid/hid-quirks.c
2248 +++ b/drivers/hid/hid-quirks.c
2249 @@ -179,6 +179,7 @@ static const struct hid_device_id hid_quirks[] = {
2250 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD2, USB_DEVICE_ID_SMARTJOY_DUAL_PLUS), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
2251 { HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD), HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
2252 { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE), HID_QUIRK_MULTI_INPUT },
2253 + { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_GROUP_AUDIO), HID_QUIRK_NOGET },
2254
2255 { 0 }
2256 };
2257 diff --git a/drivers/hid/i2c-hid/i2c-hid-core.c b/drivers/hid/i2c-hid/i2c-hid-core.c
2258 index b525b2715e074..592176aff0270 100644
2259 --- a/drivers/hid/i2c-hid/i2c-hid-core.c
2260 +++ b/drivers/hid/i2c-hid/i2c-hid-core.c
2261 @@ -422,6 +422,19 @@ static int i2c_hid_set_power(struct i2c_client *client, int power_state)
2262 dev_err(&client->dev, "failed to change power setting.\n");
2263
2264 set_pwr_exit:
2265 +
2266 + /*
2267 + * The HID over I2C specification states that if a DEVICE needs time
2268 + * after the PWR_ON request, it should utilise CLOCK stretching.
2269 + * However, it has been observered that the Windows driver provides a
2270 + * 1ms sleep between the PWR_ON and RESET requests.
2271 + * According to Goodix Windows even waits 60 ms after (other?)
2272 + * PWR_ON requests. Testing has confirmed that several devices
2273 + * will not work properly without a delay after a PWR_ON request.
2274 + */
2275 + if (!ret && power_state == I2C_HID_PWR_ON)
2276 + msleep(60);
2277 +
2278 return ret;
2279 }
2280
2281 @@ -443,15 +456,6 @@ static int i2c_hid_hwreset(struct i2c_client *client)
2282 if (ret)
2283 goto out_unlock;
2284
2285 - /*
2286 - * The HID over I2C specification states that if a DEVICE needs time
2287 - * after the PWR_ON request, it should utilise CLOCK stretching.
2288 - * However, it has been observered that the Windows driver provides a
2289 - * 1ms sleep between the PWR_ON and RESET requests and that some devices
2290 - * rely on this.
2291 - */
2292 - usleep_range(1000, 5000);
2293 -
2294 i2c_hid_dbg(ihid, "resetting...\n");
2295
2296 ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
2297 diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
2298 index 35b1fa6d962ec..4711fb191a072 100644
2299 --- a/drivers/hid/usbhid/hiddev.c
2300 +++ b/drivers/hid/usbhid/hiddev.c
2301 @@ -519,12 +519,16 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
2302
2303 switch (cmd) {
2304 case HIDIOCGUSAGE:
2305 + if (uref->usage_index >= field->report_count)
2306 + goto inval;
2307 uref->value = field->value[uref->usage_index];
2308 if (copy_to_user(user_arg, uref, sizeof(*uref)))
2309 goto fault;
2310 goto goodreturn;
2311
2312 case HIDIOCSUSAGE:
2313 + if (uref->usage_index >= field->report_count)
2314 + goto inval;
2315 field->value[uref->usage_index] = uref->value;
2316 goto goodreturn;
2317
2318 diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
2319 index dfb122b5e1b76..b812b199e5e5b 100644
2320 --- a/drivers/hwmon/nct7904.c
2321 +++ b/drivers/hwmon/nct7904.c
2322 @@ -197,7 +197,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
2323 if (ret < 0)
2324 return ret;
2325 cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
2326 - if (cnt == 0x1fff)
2327 + if (cnt == 0 || cnt == 0x1fff)
2328 rpm = 0;
2329 else
2330 rpm = 1350000 / cnt;
2331 @@ -209,7 +209,7 @@ static int nct7904_read_fan(struct device *dev, u32 attr, int channel,
2332 if (ret < 0)
2333 return ret;
2334 cnt = ((ret & 0xff00) >> 3) | (ret & 0x1f);
2335 - if (cnt == 0x1fff)
2336 + if (cnt == 0 || cnt == 0x1fff)
2337 rpm = 0;
2338 else
2339 rpm = 1350000 / cnt;
2340 diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
2341 index 0b90aa0318df3..9c162a01a5849 100644
2342 --- a/drivers/i2c/busses/i2c-rcar.c
2343 +++ b/drivers/i2c/busses/i2c-rcar.c
2344 @@ -587,6 +587,7 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
2345 /* master sent stop */
2346 if (ssr_filtered & SSR) {
2347 i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
2348 + rcar_i2c_write(priv, ICSCR, SIE | SDBS); /* clear our NACK */
2349 rcar_i2c_write(priv, ICSIER, SAR);
2350 rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
2351 }
2352 diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
2353 index cc193f2ba5d37..def62d5b42ca7 100644
2354 --- a/drivers/i2c/i2c-core-base.c
2355 +++ b/drivers/i2c/i2c-core-base.c
2356 @@ -354,7 +354,7 @@ static int i2c_device_probe(struct device *dev)
2357 * or ACPI ID table is supplied for the probing device.
2358 */
2359 if (!driver->id_table &&
2360 - !i2c_acpi_match_device(dev->driver->acpi_match_table, client) &&
2361 + !acpi_driver_match_device(dev, dev->driver) &&
2362 !i2c_of_match_device(dev->driver->of_match_table, client)) {
2363 status = -ENODEV;
2364 goto put_sync_adapter;
2365 diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
2366 index 0e6a9536eca62..612cbf668adf8 100644
2367 --- a/drivers/iommu/iova.c
2368 +++ b/drivers/iommu/iova.c
2369 @@ -811,7 +811,9 @@ iova_magazine_free_pfns(struct iova_magazine *mag, struct iova_domain *iovad)
2370 for (i = 0 ; i < mag->size; ++i) {
2371 struct iova *iova = private_find_iova(iovad, mag->pfns[i]);
2372
2373 - BUG_ON(!iova);
2374 + if (WARN_ON(!iova))
2375 + continue;
2376 +
2377 private_free_iova(iovad, iova);
2378 }
2379
2380 diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
2381 index e00f2fa27f00e..a8322a4e18d36 100644
2382 --- a/drivers/irqchip/irq-stm32-exti.c
2383 +++ b/drivers/irqchip/irq-stm32-exti.c
2384 @@ -431,6 +431,16 @@ static void stm32_irq_ack(struct irq_data *d)
2385 irq_gc_unlock(gc);
2386 }
2387
2388 +/* directly set the target bit without reading first. */
2389 +static inline void stm32_exti_write_bit(struct irq_data *d, u32 reg)
2390 +{
2391 + struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
2392 + void __iomem *base = chip_data->host_data->base;
2393 + u32 val = BIT(d->hwirq % IRQS_PER_BANK);
2394 +
2395 + writel_relaxed(val, base + reg);
2396 +}
2397 +
2398 static inline u32 stm32_exti_set_bit(struct irq_data *d, u32 reg)
2399 {
2400 struct stm32_exti_chip_data *chip_data = irq_data_get_irq_chip_data(d);
2401 @@ -464,9 +474,9 @@ static void stm32_exti_h_eoi(struct irq_data *d)
2402
2403 raw_spin_lock(&chip_data->rlock);
2404
2405 - stm32_exti_set_bit(d, stm32_bank->rpr_ofst);
2406 + stm32_exti_write_bit(d, stm32_bank->rpr_ofst);
2407 if (stm32_bank->fpr_ofst != UNDEF_REG)
2408 - stm32_exti_set_bit(d, stm32_bank->fpr_ofst);
2409 + stm32_exti_write_bit(d, stm32_bank->fpr_ofst);
2410
2411 raw_spin_unlock(&chip_data->rlock);
2412
2413 diff --git a/drivers/media/cec/cec-api.c b/drivers/media/cec/cec-api.c
2414 index 12d6764844724..ed75636a6fb34 100644
2415 --- a/drivers/media/cec/cec-api.c
2416 +++ b/drivers/media/cec/cec-api.c
2417 @@ -147,7 +147,13 @@ static long cec_adap_g_log_addrs(struct cec_adapter *adap,
2418 struct cec_log_addrs log_addrs;
2419
2420 mutex_lock(&adap->lock);
2421 - log_addrs = adap->log_addrs;
2422 + /*
2423 + * We use memcpy here instead of assignment since there is a
2424 + * hole at the end of struct cec_log_addrs that an assignment
2425 + * might ignore. So when we do copy_to_user() we could leak
2426 + * one byte of memory.
2427 + */
2428 + memcpy(&log_addrs, &adap->log_addrs, sizeof(log_addrs));
2429 if (!adap->is_configured)
2430 memset(log_addrs.log_addr, CEC_LOG_ADDR_INVALID,
2431 sizeof(log_addrs.log_addr));
2432 diff --git a/drivers/media/pci/ttpci/av7110.c b/drivers/media/pci/ttpci/av7110.c
2433 index d0cdee1c6eb0b..bf36b1e22b635 100644
2434 --- a/drivers/media/pci/ttpci/av7110.c
2435 +++ b/drivers/media/pci/ttpci/av7110.c
2436 @@ -406,14 +406,15 @@ static void debiirq(unsigned long cookie)
2437 case DATA_CI_GET:
2438 {
2439 u8 *data = av7110->debi_virt;
2440 + u8 data_0 = data[0];
2441
2442 - if ((data[0] < 2) && data[2] == 0xff) {
2443 + if (data_0 < 2 && data[2] == 0xff) {
2444 int flags = 0;
2445 if (data[5] > 0)
2446 flags |= CA_CI_MODULE_PRESENT;
2447 if (data[5] > 5)
2448 flags |= CA_CI_MODULE_READY;
2449 - av7110->ci_slot[data[0]].flags = flags;
2450 + av7110->ci_slot[data_0].flags = flags;
2451 } else
2452 ci_get_data(&av7110->ci_rbuffer,
2453 av7110->debi_virt,
2454 diff --git a/drivers/media/platform/davinci/vpif_capture.c b/drivers/media/platform/davinci/vpif_capture.c
2455 index 71f4fe882d138..74f68ac3c9a75 100644
2456 --- a/drivers/media/platform/davinci/vpif_capture.c
2457 +++ b/drivers/media/platform/davinci/vpif_capture.c
2458 @@ -1482,8 +1482,6 @@ probe_out:
2459 /* Unregister video device */
2460 video_unregister_device(&ch->video_dev);
2461 }
2462 - kfree(vpif_obj.sd);
2463 - v4l2_device_unregister(&vpif_obj.v4l2_dev);
2464
2465 return err;
2466 }
2467 diff --git a/drivers/media/rc/gpio-ir-tx.c b/drivers/media/rc/gpio-ir-tx.c
2468 index 18ca12d78314c..66703989ae185 100644
2469 --- a/drivers/media/rc/gpio-ir-tx.c
2470 +++ b/drivers/media/rc/gpio-ir-tx.c
2471 @@ -79,13 +79,8 @@ static int gpio_ir_tx(struct rc_dev *dev, unsigned int *txbuf,
2472 // space
2473 edge = ktime_add_us(edge, txbuf[i]);
2474 delta = ktime_us_delta(edge, ktime_get());
2475 - if (delta > 10) {
2476 - spin_unlock_irqrestore(&gpio_ir->lock, flags);
2477 - usleep_range(delta, delta + 10);
2478 - spin_lock_irqsave(&gpio_ir->lock, flags);
2479 - } else if (delta > 0) {
2480 + if (delta > 0)
2481 udelay(delta);
2482 - }
2483 } else {
2484 // pulse
2485 ktime_t last = ktime_add_us(edge, txbuf[i]);
2486 diff --git a/drivers/mfd/intel-lpss-pci.c b/drivers/mfd/intel-lpss-pci.c
2487 index b33030e3385c7..da91965b8f7b2 100644
2488 --- a/drivers/mfd/intel-lpss-pci.c
2489 +++ b/drivers/mfd/intel-lpss-pci.c
2490 @@ -196,6 +196,9 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
2491 { PCI_VDEVICE(INTEL, 0x1ac4), (kernel_ulong_t)&bxt_info },
2492 { PCI_VDEVICE(INTEL, 0x1ac6), (kernel_ulong_t)&bxt_info },
2493 { PCI_VDEVICE(INTEL, 0x1aee), (kernel_ulong_t)&bxt_uart_info },
2494 + /* EBG */
2495 + { PCI_VDEVICE(INTEL, 0x1bad), (kernel_ulong_t)&bxt_uart_info },
2496 + { PCI_VDEVICE(INTEL, 0x1bae), (kernel_ulong_t)&bxt_uart_info },
2497 /* GLK */
2498 { PCI_VDEVICE(INTEL, 0x31ac), (kernel_ulong_t)&glk_i2c_info },
2499 { PCI_VDEVICE(INTEL, 0x31ae), (kernel_ulong_t)&glk_i2c_info },
2500 @@ -225,6 +228,22 @@ static const struct pci_device_id intel_lpss_pci_ids[] = {
2501 { PCI_VDEVICE(INTEL, 0x34ea), (kernel_ulong_t)&bxt_i2c_info },
2502 { PCI_VDEVICE(INTEL, 0x34eb), (kernel_ulong_t)&bxt_i2c_info },
2503 { PCI_VDEVICE(INTEL, 0x34fb), (kernel_ulong_t)&spt_info },
2504 + /* TGL-H */
2505 + { PCI_VDEVICE(INTEL, 0x43a7), (kernel_ulong_t)&bxt_uart_info },
2506 + { PCI_VDEVICE(INTEL, 0x43a8), (kernel_ulong_t)&bxt_uart_info },
2507 + { PCI_VDEVICE(INTEL, 0x43a9), (kernel_ulong_t)&bxt_uart_info },
2508 + { PCI_VDEVICE(INTEL, 0x43aa), (kernel_ulong_t)&bxt_info },
2509 + { PCI_VDEVICE(INTEL, 0x43ab), (kernel_ulong_t)&bxt_info },
2510 + { PCI_VDEVICE(INTEL, 0x43ad), (kernel_ulong_t)&bxt_i2c_info },
2511 + { PCI_VDEVICE(INTEL, 0x43ae), (kernel_ulong_t)&bxt_i2c_info },
2512 + { PCI_VDEVICE(INTEL, 0x43d8), (kernel_ulong_t)&bxt_i2c_info },
2513 + { PCI_VDEVICE(INTEL, 0x43da), (kernel_ulong_t)&bxt_uart_info },
2514 + { PCI_VDEVICE(INTEL, 0x43e8), (kernel_ulong_t)&bxt_i2c_info },
2515 + { PCI_VDEVICE(INTEL, 0x43e9), (kernel_ulong_t)&bxt_i2c_info },
2516 + { PCI_VDEVICE(INTEL, 0x43ea), (kernel_ulong_t)&bxt_i2c_info },
2517 + { PCI_VDEVICE(INTEL, 0x43eb), (kernel_ulong_t)&bxt_i2c_info },
2518 + { PCI_VDEVICE(INTEL, 0x43fb), (kernel_ulong_t)&bxt_info },
2519 + { PCI_VDEVICE(INTEL, 0x43fd), (kernel_ulong_t)&bxt_info },
2520 /* EHL */
2521 { PCI_VDEVICE(INTEL, 0x4b28), (kernel_ulong_t)&bxt_uart_info },
2522 { PCI_VDEVICE(INTEL, 0x4b29), (kernel_ulong_t)&bxt_uart_info },
2523 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2524 index 4d0d13d5d0998..635345bced313 100644
2525 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
2526 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
2527 @@ -2924,7 +2924,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
2528 }
2529
2530 u64_stats_update_begin(&tx_ring->syncp);
2531 - tx_ring->tx_stats.missed_tx = missed_tx;
2532 + tx_ring->tx_stats.missed_tx += missed_tx;
2533 u64_stats_update_end(&tx_ring->syncp);
2534
2535 return rc;
2536 @@ -3848,6 +3848,9 @@ static void ena_keep_alive_wd(void *adapter_data,
2537 rx_drops = ((u64)desc->rx_drops_high << 32) | desc->rx_drops_low;
2538
2539 u64_stats_update_begin(&adapter->syncp);
2540 + /* These stats are accumulated by the device, so the counters indicate
2541 + * all drops since last reset.
2542 + */
2543 adapter->dev_stats.rx_drops = rx_drops;
2544 u64_stats_update_end(&adapter->syncp);
2545 }
2546 diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
2547 index 2580bcd850253..3978d82c95989 100644
2548 --- a/drivers/net/ethernet/freescale/gianfar.c
2549 +++ b/drivers/net/ethernet/freescale/gianfar.c
2550 @@ -751,8 +751,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
2551 continue;
2552
2553 err = gfar_parse_group(child, priv, model);
2554 - if (err)
2555 + if (err) {
2556 + of_node_put(child);
2557 goto err_grp_init;
2558 + }
2559 }
2560 } else { /* SQ_SG_MODE */
2561 err = gfar_parse_group(np, priv, model);
2562 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
2563 index ccd852ad62a4b..d50c5b55da180 100644
2564 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
2565 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
2566 @@ -192,7 +192,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
2567 }
2568
2569 /* alloc the udl from per cpu ddp pool */
2570 - ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_KERNEL, &ddp->udp);
2571 + ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
2572 if (!ddp->udl) {
2573 e_err(drv, "failed allocated ddp context\n");
2574 goto out_noddp_unmap;
2575 diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
2576 index b805abc9ec3b4..5fbabae2909ee 100644
2577 --- a/drivers/net/ipvlan/ipvlan_main.c
2578 +++ b/drivers/net/ipvlan/ipvlan_main.c
2579 @@ -106,12 +106,21 @@ static void ipvlan_port_destroy(struct net_device *dev)
2580 kfree(port);
2581 }
2582
2583 +#define IPVLAN_ALWAYS_ON_OFLOADS \
2584 + (NETIF_F_SG | NETIF_F_HW_CSUM | \
2585 + NETIF_F_GSO_ROBUST | NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL)
2586 +
2587 +#define IPVLAN_ALWAYS_ON \
2588 + (IPVLAN_ALWAYS_ON_OFLOADS | NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED)
2589 +
2590 #define IPVLAN_FEATURES \
2591 - (NETIF_F_SG | NETIF_F_CSUM_MASK | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
2592 + (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
2593 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_GSO_ROBUST | \
2594 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
2595 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
2596
2597 + /* NETIF_F_GSO_ENCAP_ALL NETIF_F_GSO_SOFTWARE Newly added */
2598 +
2599 #define IPVLAN_STATE_MASK \
2600 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
2601
2602 @@ -125,7 +134,9 @@ static int ipvlan_init(struct net_device *dev)
2603 dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
2604 (phy_dev->state & IPVLAN_STATE_MASK);
2605 dev->features = phy_dev->features & IPVLAN_FEATURES;
2606 - dev->features |= NETIF_F_LLTX | NETIF_F_VLAN_CHALLENGED;
2607 + dev->features |= IPVLAN_ALWAYS_ON;
2608 + dev->vlan_features = phy_dev->vlan_features & IPVLAN_FEATURES;
2609 + dev->vlan_features |= IPVLAN_ALWAYS_ON_OFLOADS;
2610 dev->hw_enc_features |= dev->features;
2611 dev->gso_max_size = phy_dev->gso_max_size;
2612 dev->gso_max_segs = phy_dev->gso_max_segs;
2613 @@ -225,7 +236,14 @@ static netdev_features_t ipvlan_fix_features(struct net_device *dev,
2614 {
2615 struct ipvl_dev *ipvlan = netdev_priv(dev);
2616
2617 - return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
2618 + features |= NETIF_F_ALL_FOR_ALL;
2619 + features &= (ipvlan->sfeatures | ~IPVLAN_FEATURES);
2620 + features = netdev_increment_features(ipvlan->phy_dev->features,
2621 + features, features);
2622 + features |= IPVLAN_ALWAYS_ON;
2623 + features &= (IPVLAN_FEATURES | IPVLAN_ALWAYS_ON);
2624 +
2625 + return features;
2626 }
2627
2628 static void ipvlan_change_rx_flags(struct net_device *dev, int change)
2629 @@ -732,10 +750,9 @@ static int ipvlan_device_event(struct notifier_block *unused,
2630
2631 case NETDEV_FEAT_CHANGE:
2632 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
2633 - ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
2634 ipvlan->dev->gso_max_size = dev->gso_max_size;
2635 ipvlan->dev->gso_max_segs = dev->gso_max_segs;
2636 - netdev_features_change(ipvlan->dev);
2637 + netdev_update_features(ipvlan->dev);
2638 }
2639 break;
2640
2641 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
2642 index 9d3209ae41cfb..07622cf8765ae 100644
2643 --- a/drivers/net/macvlan.c
2644 +++ b/drivers/net/macvlan.c
2645 @@ -1259,6 +1259,9 @@ static void macvlan_port_destroy(struct net_device *dev)
2646 static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
2647 struct netlink_ext_ack *extack)
2648 {
2649 + struct nlattr *nla, *head;
2650 + int rem, len;
2651 +
2652 if (tb[IFLA_ADDRESS]) {
2653 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2654 return -EINVAL;
2655 @@ -1306,6 +1309,20 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[],
2656 return -EADDRNOTAVAIL;
2657 }
2658
2659 + if (data[IFLA_MACVLAN_MACADDR_DATA]) {
2660 + head = nla_data(data[IFLA_MACVLAN_MACADDR_DATA]);
2661 + len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
2662 +
2663 + nla_for_each_attr(nla, head, len, rem) {
2664 + if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
2665 + nla_len(nla) != ETH_ALEN)
2666 + return -EINVAL;
2667 +
2668 + if (!is_valid_ether_addr(nla_data(nla)))
2669 + return -EADDRNOTAVAIL;
2670 + }
2671 + }
2672 +
2673 if (data[IFLA_MACVLAN_MACADDR_COUNT])
2674 return -EINVAL;
2675
2676 @@ -1362,10 +1379,6 @@ static int macvlan_changelink_sources(struct macvlan_dev *vlan, u32 mode,
2677 len = nla_len(data[IFLA_MACVLAN_MACADDR_DATA]);
2678
2679 nla_for_each_attr(nla, head, len, rem) {
2680 - if (nla_type(nla) != IFLA_MACVLAN_MACADDR ||
2681 - nla_len(nla) != ETH_ALEN)
2682 - continue;
2683 -
2684 addr = nla_data(nla);
2685 ret = macvlan_hash_add_source(vlan, addr);
2686 if (ret)
2687 diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
2688 index 2ae57c1de7b55..ae4c9edc445c3 100644
2689 --- a/drivers/net/wireless/ath/ath10k/hw.h
2690 +++ b/drivers/net/wireless/ath/ath10k/hw.h
2691 @@ -810,7 +810,7 @@ ath10k_is_rssi_enable(struct ath10k_hw_params *hw,
2692
2693 #define TARGET_10_4_TX_DBG_LOG_SIZE 1024
2694 #define TARGET_10_4_NUM_WDS_ENTRIES 32
2695 -#define TARGET_10_4_DMA_BURST_SIZE 0
2696 +#define TARGET_10_4_DMA_BURST_SIZE 1
2697 #define TARGET_10_4_MAC_AGGR_DELIM 0
2698 #define TARGET_10_4_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
2699 #define TARGET_10_4_VOW_CONFIG 0
2700 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2701 index e3ebb7abbdaed..4ca50353538ef 100644
2702 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2703 +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c
2704 @@ -82,6 +82,8 @@
2705
2706 #define BRCMF_ND_INFO_TIMEOUT msecs_to_jiffies(2000)
2707
2708 +#define BRCMF_PS_MAX_TIMEOUT_MS 2000
2709 +
2710 #define BRCMF_ASSOC_PARAMS_FIXED_SIZE \
2711 (sizeof(struct brcmf_assoc_params_le) - sizeof(u16))
2712
2713 @@ -2789,6 +2791,12 @@ brcmf_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *ndev,
2714 else
2715 bphy_err(drvr, "error (%d)\n", err);
2716 }
2717 +
2718 + err = brcmf_fil_iovar_int_set(ifp, "pm2_sleep_ret",
2719 + min_t(u32, timeout, BRCMF_PS_MAX_TIMEOUT_MS));
2720 + if (err)
2721 + bphy_err(drvr, "Unable to set pm timeout, (%d)\n", err);
2722 +
2723 done:
2724 brcmf_dbg(TRACE, "Exit\n");
2725 return err;
2726 diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
2727 index c66c6dc003783..bad06939a247c 100644
2728 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c
2729 +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
2730 @@ -718,8 +718,11 @@ static int _rtl_usb_receive(struct ieee80211_hw *hw)
2731
2732 usb_anchor_urb(urb, &rtlusb->rx_submitted);
2733 err = usb_submit_urb(urb, GFP_KERNEL);
2734 - if (err)
2735 + if (err) {
2736 + usb_unanchor_urb(urb);
2737 + usb_free_urb(urb);
2738 goto err_out;
2739 + }
2740 usb_free_urb(urb);
2741 }
2742 return 0;
2743 diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
2744 index 83ac88924f253..dce4d6782ceb1 100644
2745 --- a/drivers/nvme/host/fc.c
2746 +++ b/drivers/nvme/host/fc.c
2747 @@ -1740,7 +1740,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2748 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.cmddma)) {
2749 dev_err(ctrl->dev,
2750 "FCP Op failed - cmdiu dma mapping failed.\n");
2751 - ret = EFAULT;
2752 + ret = -EFAULT;
2753 goto out_on_error;
2754 }
2755
2756 @@ -1750,7 +1750,7 @@ __nvme_fc_init_request(struct nvme_fc_ctrl *ctrl,
2757 if (fc_dma_mapping_error(ctrl->lport->dev, op->fcp_req.rspdma)) {
2758 dev_err(ctrl->dev,
2759 "FCP Op failed - rspiu dma mapping failed.\n");
2760 - ret = EFAULT;
2761 + ret = -EFAULT;
2762 }
2763
2764 atomic_set(&op->state, FCPOP_STATE_IDLE);
2765 diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
2766 index 484aad0d0c9c6..0a458f7880887 100644
2767 --- a/drivers/nvme/host/multipath.c
2768 +++ b/drivers/nvme/host/multipath.c
2769 @@ -249,12 +249,17 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head,
2770 fallback = ns;
2771 }
2772
2773 - /* No optimized path found, re-check the current path */
2774 + /*
2775 + * The loop above skips the current path for round-robin semantics.
2776 + * Fall back to the current path if either:
2777 + * - no other optimized path found and current is optimized,
2778 + * - no other usable path found and current is usable.
2779 + */
2780 if (!nvme_path_is_disabled(old) &&
2781 - old->ana_state == NVME_ANA_OPTIMIZED) {
2782 - found = old;
2783 - goto out;
2784 - }
2785 + (old->ana_state == NVME_ANA_OPTIMIZED ||
2786 + (!fallback && old->ana_state == NVME_ANA_NONOPTIMIZED)))
2787 + return old;
2788 +
2789 if (!fallback)
2790 return NULL;
2791 found = fallback;
2792 diff --git a/drivers/pci/controller/dwc/pcie-qcom.c b/drivers/pci/controller/dwc/pcie-qcom.c
2793 index 270d502b8cd50..374db5d59cf87 100644
2794 --- a/drivers/pci/controller/dwc/pcie-qcom.c
2795 +++ b/drivers/pci/controller/dwc/pcie-qcom.c
2796 @@ -103,11 +103,14 @@ struct qcom_pcie_resources_2_1_0 {
2797 struct clk *iface_clk;
2798 struct clk *core_clk;
2799 struct clk *phy_clk;
2800 + struct clk *aux_clk;
2801 + struct clk *ref_clk;
2802 struct reset_control *pci_reset;
2803 struct reset_control *axi_reset;
2804 struct reset_control *ahb_reset;
2805 struct reset_control *por_reset;
2806 struct reset_control *phy_reset;
2807 + struct reset_control *ext_reset;
2808 struct regulator_bulk_data supplies[QCOM_PCIE_2_1_0_MAX_SUPPLY];
2809 };
2810
2811 @@ -253,6 +256,14 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
2812 if (IS_ERR(res->phy_clk))
2813 return PTR_ERR(res->phy_clk);
2814
2815 + res->aux_clk = devm_clk_get_optional(dev, "aux");
2816 + if (IS_ERR(res->aux_clk))
2817 + return PTR_ERR(res->aux_clk);
2818 +
2819 + res->ref_clk = devm_clk_get_optional(dev, "ref");
2820 + if (IS_ERR(res->ref_clk))
2821 + return PTR_ERR(res->ref_clk);
2822 +
2823 res->pci_reset = devm_reset_control_get_exclusive(dev, "pci");
2824 if (IS_ERR(res->pci_reset))
2825 return PTR_ERR(res->pci_reset);
2826 @@ -269,6 +280,10 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
2827 if (IS_ERR(res->por_reset))
2828 return PTR_ERR(res->por_reset);
2829
2830 + res->ext_reset = devm_reset_control_get_optional_exclusive(dev, "ext");
2831 + if (IS_ERR(res->ext_reset))
2832 + return PTR_ERR(res->ext_reset);
2833 +
2834 res->phy_reset = devm_reset_control_get_exclusive(dev, "phy");
2835 return PTR_ERR_OR_ZERO(res->phy_reset);
2836 }
2837 @@ -277,14 +292,17 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
2838 {
2839 struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
2840
2841 + clk_disable_unprepare(res->phy_clk);
2842 reset_control_assert(res->pci_reset);
2843 reset_control_assert(res->axi_reset);
2844 reset_control_assert(res->ahb_reset);
2845 reset_control_assert(res->por_reset);
2846 - reset_control_assert(res->pci_reset);
2847 + reset_control_assert(res->ext_reset);
2848 + reset_control_assert(res->phy_reset);
2849 clk_disable_unprepare(res->iface_clk);
2850 clk_disable_unprepare(res->core_clk);
2851 - clk_disable_unprepare(res->phy_clk);
2852 + clk_disable_unprepare(res->aux_clk);
2853 + clk_disable_unprepare(res->ref_clk);
2854 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
2855 }
2856
2857 @@ -315,24 +333,36 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
2858 goto err_assert_ahb;
2859 }
2860
2861 - ret = clk_prepare_enable(res->phy_clk);
2862 - if (ret) {
2863 - dev_err(dev, "cannot prepare/enable phy clock\n");
2864 - goto err_clk_phy;
2865 - }
2866 -
2867 ret = clk_prepare_enable(res->core_clk);
2868 if (ret) {
2869 dev_err(dev, "cannot prepare/enable core clock\n");
2870 goto err_clk_core;
2871 }
2872
2873 + ret = clk_prepare_enable(res->aux_clk);
2874 + if (ret) {
2875 + dev_err(dev, "cannot prepare/enable aux clock\n");
2876 + goto err_clk_aux;
2877 + }
2878 +
2879 + ret = clk_prepare_enable(res->ref_clk);
2880 + if (ret) {
2881 + dev_err(dev, "cannot prepare/enable ref clock\n");
2882 + goto err_clk_ref;
2883 + }
2884 +
2885 ret = reset_control_deassert(res->ahb_reset);
2886 if (ret) {
2887 dev_err(dev, "cannot deassert ahb reset\n");
2888 goto err_deassert_ahb;
2889 }
2890
2891 + ret = reset_control_deassert(res->ext_reset);
2892 + if (ret) {
2893 + dev_err(dev, "cannot deassert ext reset\n");
2894 + goto err_deassert_ahb;
2895 + }
2896 +
2897 /* enable PCIe clocks and resets */
2898 val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
2899 val &= ~BIT(0);
2900 @@ -387,6 +417,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
2901 return ret;
2902 }
2903
2904 + ret = clk_prepare_enable(res->phy_clk);
2905 + if (ret) {
2906 + dev_err(dev, "cannot prepare/enable phy clock\n");
2907 + goto err_deassert_ahb;
2908 + }
2909 +
2910 /* wait for clock acquisition */
2911 usleep_range(1000, 1500);
2912
2913 @@ -400,10 +436,12 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
2914 return 0;
2915
2916 err_deassert_ahb:
2917 + clk_disable_unprepare(res->ref_clk);
2918 +err_clk_ref:
2919 + clk_disable_unprepare(res->aux_clk);
2920 +err_clk_aux:
2921 clk_disable_unprepare(res->core_clk);
2922 err_clk_core:
2923 - clk_disable_unprepare(res->phy_clk);
2924 -err_clk_phy:
2925 clk_disable_unprepare(res->iface_clk);
2926 err_assert_ahb:
2927 regulator_bulk_disable(ARRAY_SIZE(res->supplies), res->supplies);
2928 diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
2929 index ae4aa0e1f2f42..1f087746b7bb0 100644
2930 --- a/drivers/pci/slot.c
2931 +++ b/drivers/pci/slot.c
2932 @@ -304,13 +304,16 @@ placeholder:
2933 slot_name = make_slot_name(name);
2934 if (!slot_name) {
2935 err = -ENOMEM;
2936 + kfree(slot);
2937 goto err;
2938 }
2939
2940 err = kobject_init_and_add(&slot->kobj, &pci_slot_ktype, NULL,
2941 "%s", slot_name);
2942 - if (err)
2943 + if (err) {
2944 + kobject_put(&slot->kobj);
2945 goto err;
2946 + }
2947
2948 INIT_LIST_HEAD(&slot->list);
2949 list_add(&slot->list, &parent->slots);
2950 @@ -329,7 +332,6 @@ out:
2951 mutex_unlock(&pci_slot_mutex);
2952 return slot;
2953 err:
2954 - kfree(slot);
2955 slot = ERR_PTR(err);
2956 goto out;
2957 }
2958 diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
2959 index 831850435c23b..5734a78dbb8e6 100644
2960 --- a/drivers/s390/cio/css.c
2961 +++ b/drivers/s390/cio/css.c
2962 @@ -677,6 +677,11 @@ static int slow_eval_known_fn(struct subchannel *sch, void *data)
2963 rc = css_evaluate_known_subchannel(sch, 1);
2964 if (rc == -EAGAIN)
2965 css_schedule_eval(sch->schid);
2966 + /*
2967 + * The loop might take long time for platforms with lots of
2968 + * known devices. Allow scheduling here.
2969 + */
2970 + cond_resched();
2971 }
2972 return 0;
2973 }
2974 diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
2975 index 1791a393795da..07a0dadc75bf5 100644
2976 --- a/drivers/scsi/fcoe/fcoe_ctlr.c
2977 +++ b/drivers/scsi/fcoe/fcoe_ctlr.c
2978 @@ -255,9 +255,9 @@ static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
2979 WARN_ON(!fcf_dev);
2980 new->fcf_dev = NULL;
2981 fcoe_fcf_device_delete(fcf_dev);
2982 - kfree(new);
2983 mutex_unlock(&cdev->lock);
2984 }
2985 + kfree(new);
2986 }
2987
2988 /**
2989 diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
2990 index b766463579800..d0296f7cf45fc 100644
2991 --- a/drivers/scsi/lpfc/lpfc_vport.c
2992 +++ b/drivers/scsi/lpfc/lpfc_vport.c
2993 @@ -642,27 +642,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
2994 vport->port_state < LPFC_VPORT_READY)
2995 return -EAGAIN;
2996 }
2997 +
2998 /*
2999 - * This is a bit of a mess. We want to ensure the shost doesn't get
3000 - * torn down until we're done with the embedded lpfc_vport structure.
3001 - *
3002 - * Beyond holding a reference for this function, we also need a
3003 - * reference for outstanding I/O requests we schedule during delete
3004 - * processing. But once we scsi_remove_host() we can no longer obtain
3005 - * a reference through scsi_host_get().
3006 - *
3007 - * So we take two references here. We release one reference at the
3008 - * bottom of the function -- after delinking the vport. And we
3009 - * release the other at the completion of the unreg_vpi that get's
3010 - * initiated after we've disposed of all other resources associated
3011 - * with the port.
3012 + * Take early refcount for outstanding I/O requests we schedule during
3013 + * delete processing for unreg_vpi. Always keep this before
3014 + * scsi_remove_host() as we can no longer obtain a reference through
3015 + * scsi_host_get() after scsi_host_remove as shost is set to SHOST_DEL.
3016 */
3017 if (!scsi_host_get(shost))
3018 return VPORT_INVAL;
3019 - if (!scsi_host_get(shost)) {
3020 - scsi_host_put(shost);
3021 - return VPORT_INVAL;
3022 - }
3023 +
3024 lpfc_free_sysfs_attr(vport);
3025
3026 lpfc_debugfs_terminate(vport);
3027 @@ -809,8 +798,9 @@ skip_logo:
3028 if (!(vport->vpi_state & LPFC_VPI_REGISTERED) ||
3029 lpfc_mbx_unreg_vpi(vport))
3030 scsi_host_put(shost);
3031 - } else
3032 + } else {
3033 scsi_host_put(shost);
3034 + }
3035
3036 lpfc_free_vpi(phba, vport->vpi);
3037 vport->work_port_events = 0;
3038 diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
3039 index a44de4c5dcf6c..fc6e12fb7d77b 100644
3040 --- a/drivers/scsi/qla2xxx/qla_gs.c
3041 +++ b/drivers/scsi/qla2xxx/qla_gs.c
3042 @@ -3673,10 +3673,22 @@ void qla24xx_async_gnnft_done(scsi_qla_host_t *vha, srb_t *sp)
3043 }
3044
3045 if (fcport->scan_state != QLA_FCPORT_FOUND) {
3046 + bool do_delete = false;
3047 +
3048 + if (fcport->scan_needed &&
3049 + fcport->disc_state == DSC_LOGIN_PEND) {
3050 + /* Cable got disconnected after we sent
3051 + * a login. Do delete to prevent timeout.
3052 + */
3053 + fcport->logout_on_delete = 1;
3054 + do_delete = true;
3055 + }
3056 +
3057 fcport->scan_needed = 0;
3058 - if ((qla_dual_mode_enabled(vha) ||
3059 - qla_ini_mode_enabled(vha)) &&
3060 - atomic_read(&fcport->state) == FCS_ONLINE) {
3061 + if (((qla_dual_mode_enabled(vha) ||
3062 + qla_ini_mode_enabled(vha)) &&
3063 + atomic_read(&fcport->state) == FCS_ONLINE) ||
3064 + do_delete) {
3065 if (fcport->loop_id != FC_NO_LOOP_ID) {
3066 if (fcport->flags & FCF_FCP2_DEVICE)
3067 fcport->logout_on_delete = 0;
3068 diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
3069 index 62a16463f0254..c1631e42d35d1 100644
3070 --- a/drivers/scsi/qla2xxx/qla_mbx.c
3071 +++ b/drivers/scsi/qla2xxx/qla_mbx.c
3072 @@ -335,14 +335,6 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
3073 if (time_after(jiffies, wait_time))
3074 break;
3075
3076 - /*
3077 - * Check if it's UNLOADING, cause we cannot poll in
3078 - * this case, or else a NULL pointer dereference
3079 - * is triggered.
3080 - */
3081 - if (unlikely(test_bit(UNLOADING, &base_vha->dpc_flags)))
3082 - return QLA_FUNCTION_TIMEOUT;
3083 -
3084 /* Check for pending interrupts. */
3085 qla2x00_poll(ha->rsp_q_map[0]);
3086
3087 diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
3088 index 941aa53363f56..f4815a4084d8c 100644
3089 --- a/drivers/scsi/qla2xxx/qla_nvme.c
3090 +++ b/drivers/scsi/qla2xxx/qla_nvme.c
3091 @@ -535,6 +535,11 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
3092 struct nvme_private *priv = fd->private;
3093 struct qla_nvme_rport *qla_rport = rport->private;
3094
3095 + if (!priv) {
3096 + /* nvme association has been torn down */
3097 + return rval;
3098 + }
3099 +
3100 fcport = qla_rport->fcport;
3101
3102 if (!qpair || !fcport || (qpair && !qpair->fw_started) ||
3103 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
3104 index d91c95d9981ac..67b1e74fcd1e6 100644
3105 --- a/drivers/scsi/qla2xxx/qla_os.c
3106 +++ b/drivers/scsi/qla2xxx/qla_os.c
3107 @@ -1993,6 +1993,11 @@ skip_pio:
3108 /* Determine queue resources */
3109 ha->max_req_queues = ha->max_rsp_queues = 1;
3110 ha->msix_count = QLA_BASE_VECTORS;
3111 +
3112 + /* Check if FW supports MQ or not */
3113 + if (!(ha->fw_attributes & BIT_6))
3114 + goto mqiobase_exit;
3115 +
3116 if (!ql2xmqsupport || !ql2xnvmeenable ||
3117 (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
3118 goto mqiobase_exit;
3119 diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
3120 index cb8a892e2d393..b75e6e4d58c06 100644
3121 --- a/drivers/scsi/qla2xxx/qla_target.c
3122 +++ b/drivers/scsi/qla2xxx/qla_target.c
3123 @@ -1262,7 +1262,7 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
3124
3125 qla24xx_chk_fcp_state(sess);
3126
3127 - ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
3128 + ql_dbg(ql_dbg_disc, sess->vha, 0xe001,
3129 "Scheduling sess %p for deletion %8phC\n",
3130 sess, sess->port_name);
3131
3132 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
3133 index a5c78b38d3022..dbad926e8f87f 100644
3134 --- a/drivers/scsi/scsi_transport_iscsi.c
3135 +++ b/drivers/scsi/scsi_transport_iscsi.c
3136 @@ -3174,7 +3174,7 @@ static int iscsi_set_flashnode_param(struct iscsi_transport *transport,
3137 pr_err("%s could not find host no %u\n",
3138 __func__, ev->u.set_flashnode.host_no);
3139 err = -ENODEV;
3140 - goto put_host;
3141 + goto exit_set_fnode;
3142 }
3143
3144 idx = ev->u.set_flashnode.flashnode_idx;
3145 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
3146 index b41b88bcab3d9..5e502e1605549 100644
3147 --- a/drivers/scsi/ufs/ufshcd.c
3148 +++ b/drivers/scsi/ufs/ufshcd.c
3149 @@ -1516,6 +1516,7 @@ unblock_reqs:
3150 int ufshcd_hold(struct ufs_hba *hba, bool async)
3151 {
3152 int rc = 0;
3153 + bool flush_result;
3154 unsigned long flags;
3155
3156 if (!ufshcd_is_clkgating_allowed(hba))
3157 @@ -1547,7 +1548,9 @@ start:
3158 break;
3159 }
3160 spin_unlock_irqrestore(hba->host->host_lock, flags);
3161 - flush_work(&hba->clk_gating.ungate_work);
3162 + flush_result = flush_work(&hba->clk_gating.ungate_work);
3163 + if (hba->clk_gating.is_suspended && !flush_result)
3164 + goto out;
3165 spin_lock_irqsave(hba->host->host_lock, flags);
3166 goto start;
3167 }
3168 @@ -5609,7 +5612,7 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status)
3169 */
3170 static irqreturn_t ufshcd_intr(int irq, void *__hba)
3171 {
3172 - u32 intr_status, enabled_intr_status;
3173 + u32 intr_status, enabled_intr_status = 0;
3174 irqreturn_t retval = IRQ_NONE;
3175 struct ufs_hba *hba = __hba;
3176 int retries = hba->nutrs;
3177 @@ -5623,7 +5626,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
3178 * read, make sure we handle them by checking the interrupt status
3179 * again in a loop until we process all of the reqs before returning.
3180 */
3181 - do {
3182 + while (intr_status && retries--) {
3183 enabled_intr_status =
3184 intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
3185 if (intr_status)
3186 @@ -5634,7 +5637,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
3187 }
3188
3189 intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
3190 - } while (intr_status && --retries);
3191 + }
3192
3193 spin_unlock(hba->host->host_lock);
3194 return retval;
3195 @@ -6137,7 +6140,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
3196 /* command completed already */
3197 dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
3198 __func__, tag);
3199 - goto out;
3200 + goto cleanup;
3201 } else {
3202 dev_err(hba->dev,
3203 "%s: no response from device. tag = %d, err %d\n",
3204 @@ -6171,6 +6174,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
3205 goto out;
3206 }
3207
3208 +cleanup:
3209 scsi_dma_unmap(cmd);
3210
3211 spin_lock_irqsave(host->host_lock, flags);
3212 diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
3213 index 7e92ab0cc9920..8146c2d91d307 100644
3214 --- a/drivers/spi/spi-stm32.c
3215 +++ b/drivers/spi/spi-stm32.c
3216 @@ -443,7 +443,8 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
3217 {
3218 u32 div, mbrdiv;
3219
3220 - div = DIV_ROUND_UP(spi->clk_rate, speed_hz);
3221 + /* Ensure spi->clk_rate is even */
3222 + div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz);
3223
3224 /*
3225 * SPI framework set xfer->speed_hz to master->max_speed_hz if
3226 @@ -469,20 +470,27 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz,
3227 /**
3228 * stm32h7_spi_prepare_fthlv - Determine FIFO threshold level
3229 * @spi: pointer to the spi controller data structure
3230 + * @xfer_len: length of the message to be transferred
3231 */
3232 -static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
3233 +static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
3234 {
3235 - u32 fthlv, half_fifo;
3236 + u32 fthlv, half_fifo, packet;
3237
3238 /* data packet should not exceed 1/2 of fifo space */
3239 half_fifo = (spi->fifo_size / 2);
3240
3241 + /* data_packet should not exceed transfer length */
3242 + if (half_fifo > xfer_len)
3243 + packet = xfer_len;
3244 + else
3245 + packet = half_fifo;
3246 +
3247 if (spi->cur_bpw <= 8)
3248 - fthlv = half_fifo;
3249 + fthlv = packet;
3250 else if (spi->cur_bpw <= 16)
3251 - fthlv = half_fifo / 2;
3252 + fthlv = packet / 2;
3253 else
3254 - fthlv = half_fifo / 4;
3255 + fthlv = packet / 4;
3256
3257 /* align packet size with data registers access */
3258 if (spi->cur_bpw > 8)
3259 @@ -490,6 +498,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi)
3260 else
3261 fthlv -= (fthlv % 4); /* multiple of 4 */
3262
3263 + if (!fthlv)
3264 + fthlv = 1;
3265 +
3266 return fthlv;
3267 }
3268
3269 @@ -962,13 +973,13 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
3270 if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0)))
3271 stm32h7_spi_read_rxfifo(spi, false);
3272
3273 - writel_relaxed(mask, spi->base + STM32H7_SPI_IFCR);
3274 + writel_relaxed(sr & mask, spi->base + STM32H7_SPI_IFCR);
3275
3276 spin_unlock_irqrestore(&spi->lock, flags);
3277
3278 if (end) {
3279 - spi_finalize_current_transfer(master);
3280 stm32h7_spi_disable(spi);
3281 + spi_finalize_current_transfer(master);
3282 }
3283
3284 return IRQ_HANDLED;
3285 @@ -1396,7 +1407,7 @@ static void stm32h7_spi_set_bpw(struct stm32_spi *spi)
3286 cfg1_setb |= (bpw << STM32H7_SPI_CFG1_DSIZE_SHIFT) &
3287 STM32H7_SPI_CFG1_DSIZE;
3288
3289 - spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi);
3290 + spi->cur_fthlv = stm32h7_spi_prepare_fthlv(spi, spi->cur_xferlen);
3291 fthlv = spi->cur_fthlv - 1;
3292
3293 cfg1_clrb |= STM32H7_SPI_CFG1_FTHLV;
3294 @@ -1579,39 +1590,33 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
3295 unsigned long flags;
3296 unsigned int comm_type;
3297 int nb_words, ret = 0;
3298 + int mbr;
3299
3300 spin_lock_irqsave(&spi->lock, flags);
3301
3302 - if (spi->cur_bpw != transfer->bits_per_word) {
3303 - spi->cur_bpw = transfer->bits_per_word;
3304 - spi->cfg->set_bpw(spi);
3305 - }
3306 + spi->cur_xferlen = transfer->len;
3307
3308 - if (spi->cur_speed != transfer->speed_hz) {
3309 - int mbr;
3310 + spi->cur_bpw = transfer->bits_per_word;
3311 + spi->cfg->set_bpw(spi);
3312
3313 - /* Update spi->cur_speed with real clock speed */
3314 - mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
3315 - spi->cfg->baud_rate_div_min,
3316 - spi->cfg->baud_rate_div_max);
3317 - if (mbr < 0) {
3318 - ret = mbr;
3319 - goto out;
3320 - }
3321 -
3322 - transfer->speed_hz = spi->cur_speed;
3323 - stm32_spi_set_mbr(spi, mbr);
3324 + /* Update spi->cur_speed with real clock speed */
3325 + mbr = stm32_spi_prepare_mbr(spi, transfer->speed_hz,
3326 + spi->cfg->baud_rate_div_min,
3327 + spi->cfg->baud_rate_div_max);
3328 + if (mbr < 0) {
3329 + ret = mbr;
3330 + goto out;
3331 }
3332
3333 - comm_type = stm32_spi_communication_type(spi_dev, transfer);
3334 - if (spi->cur_comm != comm_type) {
3335 - ret = spi->cfg->set_mode(spi, comm_type);
3336 + transfer->speed_hz = spi->cur_speed;
3337 + stm32_spi_set_mbr(spi, mbr);
3338
3339 - if (ret < 0)
3340 - goto out;
3341 + comm_type = stm32_spi_communication_type(spi_dev, transfer);
3342 + ret = spi->cfg->set_mode(spi, comm_type);
3343 + if (ret < 0)
3344 + goto out;
3345
3346 - spi->cur_comm = comm_type;
3347 - }
3348 + spi->cur_comm = comm_type;
3349
3350 if (spi->cfg->set_data_idleness)
3351 spi->cfg->set_data_idleness(spi, transfer->len);
3352 @@ -1629,8 +1634,6 @@ static int stm32_spi_transfer_one_setup(struct stm32_spi *spi,
3353 goto out;
3354 }
3355
3356 - spi->cur_xferlen = transfer->len;
3357 -
3358 dev_dbg(spi->dev, "transfer communication mode set to %d\n",
3359 spi->cur_comm);
3360 dev_dbg(spi->dev,
3361 diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
3362 index 8533444159635..e7b3c6e5d5744 100644
3363 --- a/drivers/target/target_core_internal.h
3364 +++ b/drivers/target/target_core_internal.h
3365 @@ -138,6 +138,7 @@ int init_se_kmem_caches(void);
3366 void release_se_kmem_caches(void);
3367 u32 scsi_get_new_index(scsi_index_t);
3368 void transport_subsystem_check_init(void);
3369 +void transport_uninit_session(struct se_session *);
3370 unsigned char *transport_dump_cmd_direction(struct se_cmd *);
3371 void transport_dump_dev_state(struct se_device *, char *, int *);
3372 void transport_dump_dev_info(struct se_device *, struct se_lun *,
3373 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
3374 index 7c78a5d02c083..b1f4be055f838 100644
3375 --- a/drivers/target/target_core_transport.c
3376 +++ b/drivers/target/target_core_transport.c
3377 @@ -236,6 +236,11 @@ int transport_init_session(struct se_session *se_sess)
3378 }
3379 EXPORT_SYMBOL(transport_init_session);
3380
3381 +void transport_uninit_session(struct se_session *se_sess)
3382 +{
3383 + percpu_ref_exit(&se_sess->cmd_count);
3384 +}
3385 +
3386 /**
3387 * transport_alloc_session - allocate a session object and initialize it
3388 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
3389 @@ -579,7 +584,7 @@ void transport_free_session(struct se_session *se_sess)
3390 sbitmap_queue_free(&se_sess->sess_tag_pool);
3391 kvfree(se_sess->sess_cmd_map);
3392 }
3393 - percpu_ref_exit(&se_sess->cmd_count);
3394 + transport_uninit_session(se_sess);
3395 kmem_cache_free(se_sess_cache, se_sess);
3396 }
3397 EXPORT_SYMBOL(transport_free_session);
3398 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
3399 index d766fb14942b3..8888cdf3eead9 100644
3400 --- a/drivers/target/target_core_user.c
3401 +++ b/drivers/target/target_core_user.c
3402 @@ -1220,7 +1220,14 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
3403
3404 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
3405
3406 - tcmu_flush_dcache_range(entry, sizeof(*entry));
3407 + /*
3408 + * Flush max. up to end of cmd ring since current entry might
3409 + * be a padding that is shorter than sizeof(*entry)
3410 + */
3411 + size_t ring_left = head_to_end(udev->cmdr_last_cleaned,
3412 + udev->cmdr_size);
3413 + tcmu_flush_dcache_range(entry, ring_left < sizeof(*entry) ?
3414 + ring_left : sizeof(*entry));
3415
3416 if (tcmu_hdr_get_op(entry->hdr.len_op) == TCMU_OP_PAD) {
3417 UPDATE_HEAD(udev->cmdr_last_cleaned,
3418 diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
3419 index b9b1e92c6f8db..9d24e85b08631 100644
3420 --- a/drivers/target/target_core_xcopy.c
3421 +++ b/drivers/target/target_core_xcopy.c
3422 @@ -479,7 +479,7 @@ int target_xcopy_setup_pt(void)
3423 memset(&xcopy_pt_sess, 0, sizeof(struct se_session));
3424 ret = transport_init_session(&xcopy_pt_sess);
3425 if (ret < 0)
3426 - return ret;
3427 + goto destroy_wq;
3428
3429 xcopy_pt_nacl.se_tpg = &xcopy_pt_tpg;
3430 xcopy_pt_nacl.nacl_sess = &xcopy_pt_sess;
3431 @@ -488,12 +488,19 @@ int target_xcopy_setup_pt(void)
3432 xcopy_pt_sess.se_node_acl = &xcopy_pt_nacl;
3433
3434 return 0;
3435 +
3436 +destroy_wq:
3437 + destroy_workqueue(xcopy_wq);
3438 + xcopy_wq = NULL;
3439 + return ret;
3440 }
3441
3442 void target_xcopy_release_pt(void)
3443 {
3444 - if (xcopy_wq)
3445 + if (xcopy_wq) {
3446 destroy_workqueue(xcopy_wq);
3447 + transport_uninit_session(&xcopy_pt_sess);
3448 + }
3449 }
3450
3451 /*
3452 diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
3453 index 9e2dbe43667ae..93367dea4d8a5 100644
3454 --- a/drivers/tty/serial/8250/8250_exar.c
3455 +++ b/drivers/tty/serial/8250/8250_exar.c
3456 @@ -725,6 +725,24 @@ static const struct exar8250_board pbn_exar_XR17V35x = {
3457 .exit = pci_xr17v35x_exit,
3458 };
3459
3460 +static const struct exar8250_board pbn_fastcom35x_2 = {
3461 + .num_ports = 2,
3462 + .setup = pci_xr17v35x_setup,
3463 + .exit = pci_xr17v35x_exit,
3464 +};
3465 +
3466 +static const struct exar8250_board pbn_fastcom35x_4 = {
3467 + .num_ports = 4,
3468 + .setup = pci_xr17v35x_setup,
3469 + .exit = pci_xr17v35x_exit,
3470 +};
3471 +
3472 +static const struct exar8250_board pbn_fastcom35x_8 = {
3473 + .num_ports = 8,
3474 + .setup = pci_xr17v35x_setup,
3475 + .exit = pci_xr17v35x_exit,
3476 +};
3477 +
3478 static const struct exar8250_board pbn_exar_XR17V4358 = {
3479 .num_ports = 12,
3480 .setup = pci_xr17v35x_setup,
3481 @@ -795,9 +813,9 @@ static const struct pci_device_id exar_pci_tbl[] = {
3482 EXAR_DEVICE(EXAR, EXAR_XR17V358, pbn_exar_XR17V35x),
3483 EXAR_DEVICE(EXAR, EXAR_XR17V4358, pbn_exar_XR17V4358),
3484 EXAR_DEVICE(EXAR, EXAR_XR17V8358, pbn_exar_XR17V8358),
3485 - EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_exar_XR17V35x),
3486 - EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_exar_XR17V35x),
3487 - EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_exar_XR17V35x),
3488 + EXAR_DEVICE(COMMTECH, COMMTECH_4222PCIE, pbn_fastcom35x_2),
3489 + EXAR_DEVICE(COMMTECH, COMMTECH_4224PCIE, pbn_fastcom35x_4),
3490 + EXAR_DEVICE(COMMTECH, COMMTECH_4228PCIE, pbn_fastcom35x_8),
3491
3492 EXAR_DEVICE(COMMTECH, COMMTECH_4222PCI335, pbn_fastcom335_2),
3493 EXAR_DEVICE(COMMTECH, COMMTECH_4224PCI335, pbn_fastcom335_4),
3494 diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
3495 index dbb27303a6b49..90f09ed6e5ad3 100644
3496 --- a/drivers/tty/serial/8250/8250_port.c
3497 +++ b/drivers/tty/serial/8250/8250_port.c
3498 @@ -2198,6 +2198,10 @@ int serial8250_do_startup(struct uart_port *port)
3499
3500 if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
3501 unsigned char iir1;
3502 +
3503 + if (port->irqflags & IRQF_SHARED)
3504 + disable_irq_nosync(port->irq);
3505 +
3506 /*
3507 * Test for UARTs that do not reassert THRE when the
3508 * transmitter is idle and the interrupt has already
3509 @@ -2207,8 +2211,6 @@ int serial8250_do_startup(struct uart_port *port)
3510 * allow register changes to become visible.
3511 */
3512 spin_lock_irqsave(&port->lock, flags);
3513 - if (up->port.irqflags & IRQF_SHARED)
3514 - disable_irq_nosync(port->irq);
3515
3516 wait_for_xmitr(up, UART_LSR_THRE);
3517 serial_port_out_sync(port, UART_IER, UART_IER_THRI);
3518 @@ -2220,9 +2222,10 @@ int serial8250_do_startup(struct uart_port *port)
3519 iir = serial_port_in(port, UART_IIR);
3520 serial_port_out(port, UART_IER, 0);
3521
3522 + spin_unlock_irqrestore(&port->lock, flags);
3523 +
3524 if (port->irqflags & IRQF_SHARED)
3525 enable_irq(port->irq);
3526 - spin_unlock_irqrestore(&port->lock, flags);
3527
3528 /*
3529 * If the interrupt is not reasserted, or we otherwise
3530 diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
3531 index de3e8c24c03e7..a8a538b34b535 100644
3532 --- a/drivers/tty/serial/amba-pl011.c
3533 +++ b/drivers/tty/serial/amba-pl011.c
3534 @@ -2252,9 +2252,8 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
3535 clk_disable(uap->clk);
3536 }
3537
3538 -static void __init
3539 -pl011_console_get_options(struct uart_amba_port *uap, int *baud,
3540 - int *parity, int *bits)
3541 +static void pl011_console_get_options(struct uart_amba_port *uap, int *baud,
3542 + int *parity, int *bits)
3543 {
3544 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
3545 unsigned int lcr_h, ibrd, fbrd;
3546 @@ -2287,7 +2286,7 @@ pl011_console_get_options(struct uart_amba_port *uap, int *baud,
3547 }
3548 }
3549
3550 -static int __init pl011_console_setup(struct console *co, char *options)
3551 +static int pl011_console_setup(struct console *co, char *options)
3552 {
3553 struct uart_amba_port *uap;
3554 int baud = 38400;
3555 @@ -2355,8 +2354,8 @@ static int __init pl011_console_setup(struct console *co, char *options)
3556 *
3557 * Returns 0 if console matches; otherwise non-zero to use default matching
3558 */
3559 -static int __init pl011_console_match(struct console *co, char *name, int idx,
3560 - char *options)
3561 +static int pl011_console_match(struct console *co, char *name, int idx,
3562 + char *options)
3563 {
3564 unsigned char iotype;
3565 resource_size_t addr;
3566 @@ -2594,7 +2593,7 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
3567
3568 static int pl011_register_port(struct uart_amba_port *uap)
3569 {
3570 - int ret;
3571 + int ret, i;
3572
3573 /* Ensure interrupts from this UART are masked and cleared */
3574 pl011_write(0, uap, REG_IMSC);
3575 @@ -2605,6 +2604,9 @@ static int pl011_register_port(struct uart_amba_port *uap)
3576 if (ret < 0) {
3577 dev_err(uap->port.dev,
3578 "Failed to register AMBA-PL011 driver\n");
3579 + for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
3580 + if (amba_ports[i] == uap)
3581 + amba_ports[i] = NULL;
3582 return ret;
3583 }
3584 }
3585 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
3586 index 83fd51607741b..71f99e9217592 100644
3587 --- a/drivers/tty/serial/samsung.c
3588 +++ b/drivers/tty/serial/samsung.c
3589 @@ -1791,9 +1791,11 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
3590 ourport->tx_irq = ret + 1;
3591 }
3592
3593 - ret = platform_get_irq(platdev, 1);
3594 - if (ret > 0)
3595 - ourport->tx_irq = ret;
3596 + if (!s3c24xx_serial_has_interrupt_mask(port)) {
3597 + ret = platform_get_irq(platdev, 1);
3598 + if (ret > 0)
3599 + ourport->tx_irq = ret;
3600 + }
3601 /*
3602 * DMA is currently supported only on DT platforms, if DMA properties
3603 * are specified.
3604 diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
3605 index 2f72514d63edd..bf83e6c212f59 100644
3606 --- a/drivers/tty/serial/stm32-usart.c
3607 +++ b/drivers/tty/serial/stm32-usart.c
3608 @@ -937,7 +937,7 @@ static int stm32_init_port(struct stm32_port *stm32port,
3609 stm32_init_rs485(port, pdev);
3610
3611 if (stm32port->info->cfg.has_wakeup) {
3612 - stm32port->wakeirq = platform_get_irq(pdev, 1);
3613 + stm32port->wakeirq = platform_get_irq_optional(pdev, 1);
3614 if (stm32port->wakeirq <= 0 && stm32port->wakeirq != -ENXIO)
3615 return stm32port->wakeirq ? : -ENODEV;
3616 }
3617 diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
3618 index 8051c70326277..d07a9c9c76081 100644
3619 --- a/drivers/tty/vt/vt.c
3620 +++ b/drivers/tty/vt/vt.c
3621 @@ -1196,7 +1196,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
3622 unsigned int old_rows, old_row_size, first_copied_row;
3623 unsigned int new_cols, new_rows, new_row_size, new_screen_size;
3624 unsigned int user;
3625 - unsigned short *newscreen;
3626 + unsigned short *oldscreen, *newscreen;
3627 struct uni_screen *new_uniscr = NULL;
3628
3629 WARN_CONSOLE_UNLOCKED();
3630 @@ -1294,10 +1294,11 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
3631 if (new_scr_end > new_origin)
3632 scr_memsetw((void *)new_origin, vc->vc_video_erase_char,
3633 new_scr_end - new_origin);
3634 - kfree(vc->vc_screenbuf);
3635 + oldscreen = vc->vc_screenbuf;
3636 vc->vc_screenbuf = newscreen;
3637 vc->vc_screenbuf_size = new_screen_size;
3638 set_origin(vc);
3639 + kfree(oldscreen);
3640
3641 /* do part of a reset_terminal() */
3642 vc->vc_top = 0;
3643 diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
3644 index daf61c28ba766..cbc85c995d92d 100644
3645 --- a/drivers/tty/vt/vt_ioctl.c
3646 +++ b/drivers/tty/vt/vt_ioctl.c
3647 @@ -893,12 +893,22 @@ int vt_ioctl(struct tty_struct *tty,
3648 console_lock();
3649 vcp = vc_cons[i].d;
3650 if (vcp) {
3651 + int ret;
3652 + int save_scan_lines = vcp->vc_scan_lines;
3653 + int save_font_height = vcp->vc_font.height;
3654 +
3655 if (v.v_vlin)
3656 vcp->vc_scan_lines = v.v_vlin;
3657 if (v.v_clin)
3658 vcp->vc_font.height = v.v_clin;
3659 vcp->vc_resize_user = 1;
3660 - vc_resize(vcp, v.v_cols, v.v_rows);
3661 + ret = vc_resize(vcp, v.v_cols, v.v_rows);
3662 + if (ret) {
3663 + vcp->vc_scan_lines = save_scan_lines;
3664 + vcp->vc_font.height = save_font_height;
3665 + console_unlock();
3666 + return ret;
3667 + }
3668 }
3669 console_unlock();
3670 }
3671 diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
3672 index 856c34010021b..9900888afbcd8 100644
3673 --- a/drivers/usb/cdns3/gadget.c
3674 +++ b/drivers/usb/cdns3/gadget.c
3675 @@ -189,10 +189,10 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep)
3676 GFP_DMA32 | GFP_ATOMIC);
3677 if (!priv_ep->trb_pool)
3678 return -ENOMEM;
3679 - } else {
3680 - memset(priv_ep->trb_pool, 0, ring_size);
3681 }
3682
3683 + memset(priv_ep->trb_pool, 0, ring_size);
3684 +
3685 if (!priv_ep->num)
3686 return 0;
3687
3688 diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
3689 index d5187b50fc828..7499ba118665a 100644
3690 --- a/drivers/usb/class/cdc-acm.c
3691 +++ b/drivers/usb/class/cdc-acm.c
3692 @@ -378,21 +378,19 @@ static void acm_ctrl_irq(struct urb *urb)
3693 if (current_size < expected_size) {
3694 /* notification is transmitted fragmented, reassemble */
3695 if (acm->nb_size < expected_size) {
3696 - if (acm->nb_size) {
3697 - kfree(acm->notification_buffer);
3698 - acm->nb_size = 0;
3699 - }
3700 + u8 *new_buffer;
3701 alloc_size = roundup_pow_of_two(expected_size);
3702 - /*
3703 - * kmalloc ensures a valid notification_buffer after a
3704 - * use of kfree in case the previous allocation was too
3705 - * small. Final freeing is done on disconnect.
3706 - */
3707 - acm->notification_buffer =
3708 - kmalloc(alloc_size, GFP_ATOMIC);
3709 - if (!acm->notification_buffer)
3710 + /* Final freeing is done on disconnect. */
3711 + new_buffer = krealloc(acm->notification_buffer,
3712 + alloc_size, GFP_ATOMIC);
3713 + if (!new_buffer) {
3714 + acm->nb_index = 0;
3715 goto exit;
3716 + }
3717 +
3718 + acm->notification_buffer = new_buffer;
3719 acm->nb_size = alloc_size;
3720 + dr = (struct usb_cdc_notification *)acm->notification_buffer;
3721 }
3722
3723 copy_size = min(current_size,
3724 diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
3725 index c96c50faccf72..2f068e525a374 100644
3726 --- a/drivers/usb/core/quirks.c
3727 +++ b/drivers/usb/core/quirks.c
3728 @@ -370,6 +370,10 @@ static const struct usb_device_id usb_quirk_list[] = {
3729 { USB_DEVICE(0x0926, 0x0202), .driver_info =
3730 USB_QUIRK_ENDPOINT_BLACKLIST },
3731
3732 + /* Sound Devices MixPre-D */
3733 + { USB_DEVICE(0x0926, 0x0208), .driver_info =
3734 + USB_QUIRK_ENDPOINT_BLACKLIST },
3735 +
3736 /* Keytouch QWERTY Panel keyboard */
3737 { USB_DEVICE(0x0926, 0x3333), .driver_info =
3738 USB_QUIRK_CONFIG_INTF_STRINGS },
3739 @@ -465,6 +469,8 @@ static const struct usb_device_id usb_quirk_list[] = {
3740
3741 { USB_DEVICE(0x2386, 0x3119), .driver_info = USB_QUIRK_NO_LPM },
3742
3743 + { USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
3744 +
3745 /* DJI CineSSD */
3746 { USB_DEVICE(0x2ca3, 0x0031), .driver_info = USB_QUIRK_NO_LPM },
3747
3748 @@ -509,6 +515,7 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
3749 */
3750 static const struct usb_device_id usb_endpoint_blacklist[] = {
3751 { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
3752 + { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0208, 1), .driver_info = 0x85 },
3753 { }
3754 };
3755
3756 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
3757 index 17340864a5408..4225544342519 100644
3758 --- a/drivers/usb/dwc3/gadget.c
3759 +++ b/drivers/usb/dwc3/gadget.c
3760 @@ -1017,26 +1017,24 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb,
3761 * dwc3_prepare_one_trb - setup one TRB from one request
3762 * @dep: endpoint for which this request is prepared
3763 * @req: dwc3_request pointer
3764 + * @trb_length: buffer size of the TRB
3765 * @chain: should this TRB be chained to the next?
3766 * @node: only for isochronous endpoints. First TRB needs different type.
3767 */
3768 static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
3769 - struct dwc3_request *req, unsigned chain, unsigned node)
3770 + struct dwc3_request *req, unsigned int trb_length,
3771 + unsigned chain, unsigned node)
3772 {
3773 struct dwc3_trb *trb;
3774 - unsigned int length;
3775 dma_addr_t dma;
3776 unsigned stream_id = req->request.stream_id;
3777 unsigned short_not_ok = req->request.short_not_ok;
3778 unsigned no_interrupt = req->request.no_interrupt;
3779
3780 - if (req->request.num_sgs > 0) {
3781 - length = sg_dma_len(req->start_sg);
3782 + if (req->request.num_sgs > 0)
3783 dma = sg_dma_address(req->start_sg);
3784 - } else {
3785 - length = req->request.length;
3786 + else
3787 dma = req->request.dma;
3788 - }
3789
3790 trb = &dep->trb_pool[dep->trb_enqueue];
3791
3792 @@ -1048,7 +1046,7 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
3793
3794 req->num_trbs++;
3795
3796 - __dwc3_prepare_one_trb(dep, trb, dma, length, chain, node,
3797 + __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node,
3798 stream_id, short_not_ok, no_interrupt);
3799 }
3800
3801 @@ -1058,16 +1056,27 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
3802 struct scatterlist *sg = req->start_sg;
3803 struct scatterlist *s;
3804 int i;
3805 -
3806 + unsigned int length = req->request.length;
3807 unsigned int remaining = req->request.num_mapped_sgs
3808 - req->num_queued_sgs;
3809
3810 + /*
3811 + * If we resume preparing the request, then get the remaining length of
3812 + * the request and resume where we left off.
3813 + */
3814 + for_each_sg(req->request.sg, s, req->num_queued_sgs, i)
3815 + length -= sg_dma_len(s);
3816 +
3817 for_each_sg(sg, s, remaining, i) {
3818 - unsigned int length = req->request.length;
3819 unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
3820 unsigned int rem = length % maxp;
3821 + unsigned int trb_length;
3822 unsigned chain = true;
3823
3824 + trb_length = min_t(unsigned int, length, sg_dma_len(s));
3825 +
3826 + length -= trb_length;
3827 +
3828 /*
3829 * IOMMU driver is coalescing the list of sgs which shares a
3830 * page boundary into one and giving it to USB driver. With
3831 @@ -1075,7 +1084,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
3832 * sgs passed. So mark the chain bit to false if it isthe last
3833 * mapped sg.
3834 */
3835 - if (i == remaining - 1)
3836 + if ((i == remaining - 1) || !length)
3837 chain = false;
3838
3839 if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
3840 @@ -1085,7 +1094,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
3841 req->needs_extra_trb = true;
3842
3843 /* prepare normal TRB */
3844 - dwc3_prepare_one_trb(dep, req, true, i);
3845 + dwc3_prepare_one_trb(dep, req, trb_length, true, i);
3846
3847 /* Now prepare one extra TRB to align transfer size */
3848 trb = &dep->trb_pool[dep->trb_enqueue];
3849 @@ -1095,8 +1104,37 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
3850 req->request.stream_id,
3851 req->request.short_not_ok,
3852 req->request.no_interrupt);
3853 + } else if (req->request.zero && req->request.length &&
3854 + !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
3855 + !rem && !chain) {
3856 + struct dwc3 *dwc = dep->dwc;
3857 + struct dwc3_trb *trb;
3858 +
3859 + req->needs_extra_trb = true;
3860 +
3861 + /* Prepare normal TRB */
3862 + dwc3_prepare_one_trb(dep, req, trb_length, true, i);
3863 +
3864 + /* Prepare one extra TRB to handle ZLP */
3865 + trb = &dep->trb_pool[dep->trb_enqueue];
3866 + req->num_trbs++;
3867 + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
3868 + !req->direction, 1,
3869 + req->request.stream_id,
3870 + req->request.short_not_ok,
3871 + req->request.no_interrupt);
3872 +
3873 + /* Prepare one more TRB to handle MPS alignment */
3874 + if (!req->direction) {
3875 + trb = &dep->trb_pool[dep->trb_enqueue];
3876 + req->num_trbs++;
3877 + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
3878 + false, 1, req->request.stream_id,
3879 + req->request.short_not_ok,
3880 + req->request.no_interrupt);
3881 + }
3882 } else {
3883 - dwc3_prepare_one_trb(dep, req, chain, i);
3884 + dwc3_prepare_one_trb(dep, req, trb_length, chain, i);
3885 }
3886
3887 /*
3888 @@ -1111,6 +1149,16 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
3889
3890 req->num_queued_sgs++;
3891
3892 + /*
3893 + * The number of pending SG entries may not correspond to the
3894 + * number of mapped SG entries. If all the data are queued, then
3895 + * don't include unused SG entries.
3896 + */
3897 + if (length == 0) {
3898 + req->num_pending_sgs -= req->request.num_mapped_sgs - req->num_queued_sgs;
3899 + break;
3900 + }
3901 +
3902 if (!dwc3_calc_trbs_left(dep))
3903 break;
3904 }
3905 @@ -1130,7 +1178,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
3906 req->needs_extra_trb = true;
3907
3908 /* prepare normal TRB */
3909 - dwc3_prepare_one_trb(dep, req, true, 0);
3910 + dwc3_prepare_one_trb(dep, req, length, true, 0);
3911
3912 /* Now prepare one extra TRB to align transfer size */
3913 trb = &dep->trb_pool[dep->trb_enqueue];
3914 @@ -1140,6 +1188,7 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
3915 req->request.short_not_ok,
3916 req->request.no_interrupt);
3917 } else if (req->request.zero && req->request.length &&
3918 + !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
3919 (IS_ALIGNED(req->request.length, maxp))) {
3920 struct dwc3 *dwc = dep->dwc;
3921 struct dwc3_trb *trb;
3922 @@ -1147,17 +1196,27 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep,
3923 req->needs_extra_trb = true;
3924
3925 /* prepare normal TRB */
3926 - dwc3_prepare_one_trb(dep, req, true, 0);
3927 + dwc3_prepare_one_trb(dep, req, length, true, 0);
3928
3929 - /* Now prepare one extra TRB to handle ZLP */
3930 + /* Prepare one extra TRB to handle ZLP */
3931 trb = &dep->trb_pool[dep->trb_enqueue];
3932 req->num_trbs++;
3933 __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0,
3934 - false, 1, req->request.stream_id,
3935 + !req->direction, 1, req->request.stream_id,
3936 req->request.short_not_ok,
3937 req->request.no_interrupt);
3938 +
3939 + /* Prepare one more TRB to handle MPS alignment for OUT */
3940 + if (!req->direction) {
3941 + trb = &dep->trb_pool[dep->trb_enqueue];
3942 + req->num_trbs++;
3943 + __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp,
3944 + false, 1, req->request.stream_id,
3945 + req->request.short_not_ok,
3946 + req->request.no_interrupt);
3947 + }
3948 } else {
3949 - dwc3_prepare_one_trb(dep, req, false, 0);
3950 + dwc3_prepare_one_trb(dep, req, length, false, 0);
3951 }
3952 }
3953
3954 @@ -2559,8 +2618,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep,
3955 status);
3956
3957 if (req->needs_extra_trb) {
3958 + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc);
3959 +
3960 ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event,
3961 status);
3962 +
3963 + /* Reclaim MPS padding TRB for ZLP */
3964 + if (!req->direction && req->request.zero && req->request.length &&
3965 + !usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
3966 + (IS_ALIGNED(req->request.length, maxp)))
3967 + ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status);
3968 +
3969 req->needs_extra_trb = false;
3970 }
3971
3972 diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
3973 index 1d900081b1f0c..b4206b0dede54 100644
3974 --- a/drivers/usb/gadget/function/f_ncm.c
3975 +++ b/drivers/usb/gadget/function/f_ncm.c
3976 @@ -1181,12 +1181,15 @@ static int ncm_unwrap_ntb(struct gether *port,
3977 int ndp_index;
3978 unsigned dg_len, dg_len2;
3979 unsigned ndp_len;
3980 + unsigned block_len;
3981 struct sk_buff *skb2;
3982 int ret = -EINVAL;
3983 - unsigned max_size = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
3984 + unsigned ntb_max = le32_to_cpu(ntb_parameters.dwNtbOutMaxSize);
3985 + unsigned frame_max = le16_to_cpu(ecm_desc.wMaxSegmentSize);
3986 const struct ndp_parser_opts *opts = ncm->parser_opts;
3987 unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0;
3988 int dgram_counter;
3989 + bool ndp_after_header;
3990
3991 /* dwSignature */
3992 if (get_unaligned_le32(tmp) != opts->nth_sign) {
3993 @@ -1205,25 +1208,37 @@ static int ncm_unwrap_ntb(struct gether *port,
3994 }
3995 tmp++; /* skip wSequence */
3996
3997 + block_len = get_ncm(&tmp, opts->block_length);
3998 /* (d)wBlockLength */
3999 - if (get_ncm(&tmp, opts->block_length) > max_size) {
4000 + if (block_len > ntb_max) {
4001 INFO(port->func.config->cdev, "OUT size exceeded\n");
4002 goto err;
4003 }
4004
4005 ndp_index = get_ncm(&tmp, opts->ndp_index);
4006 + ndp_after_header = false;
4007
4008 /* Run through all the NDP's in the NTB */
4009 do {
4010 - /* NCM 3.2 */
4011 - if (((ndp_index % 4) != 0) &&
4012 - (ndp_index < opts->nth_size)) {
4013 + /*
4014 + * NCM 3.2
4015 + * dwNdpIndex
4016 + */
4017 + if (((ndp_index % 4) != 0) ||
4018 + (ndp_index < opts->nth_size) ||
4019 + (ndp_index > (block_len -
4020 + opts->ndp_size))) {
4021 INFO(port->func.config->cdev, "Bad index: %#X\n",
4022 ndp_index);
4023 goto err;
4024 }
4025 + if (ndp_index == opts->nth_size)
4026 + ndp_after_header = true;
4027
4028 - /* walk through NDP */
4029 + /*
4030 + * walk through NDP
4031 + * dwSignature
4032 + */
4033 tmp = (void *)(skb->data + ndp_index);
4034 if (get_unaligned_le32(tmp) != ncm->ndp_sign) {
4035 INFO(port->func.config->cdev, "Wrong NDP SIGN\n");
4036 @@ -1234,14 +1249,15 @@ static int ncm_unwrap_ntb(struct gether *port,
4037 ndp_len = get_unaligned_le16(tmp++);
4038 /*
4039 * NCM 3.3.1
4040 + * wLength
4041 * entry is 2 items
4042 * item size is 16/32 bits, opts->dgram_item_len * 2 bytes
4043 * minimal: struct usb_cdc_ncm_ndpX + normal entry + zero entry
4044 * Each entry is a dgram index and a dgram length.
4045 */
4046 if ((ndp_len < opts->ndp_size
4047 - + 2 * 2 * (opts->dgram_item_len * 2))
4048 - || (ndp_len % opts->ndplen_align != 0)) {
4049 + + 2 * 2 * (opts->dgram_item_len * 2)) ||
4050 + (ndp_len % opts->ndplen_align != 0)) {
4051 INFO(port->func.config->cdev, "Bad NDP length: %#X\n",
4052 ndp_len);
4053 goto err;
4054 @@ -1258,8 +1274,21 @@ static int ncm_unwrap_ntb(struct gether *port,
4055
4056 do {
4057 index = index2;
4058 + /* wDatagramIndex[0] */
4059 + if ((index < opts->nth_size) ||
4060 + (index > block_len - opts->dpe_size)) {
4061 + INFO(port->func.config->cdev,
4062 + "Bad index: %#X\n", index);
4063 + goto err;
4064 + }
4065 +
4066 dg_len = dg_len2;
4067 - if (dg_len < 14 + crc_len) { /* ethernet hdr + crc */
4068 + /*
4069 + * wDatagramLength[0]
4070 + * ethernet hdr + crc or larger than max frame size
4071 + */
4072 + if ((dg_len < 14 + crc_len) ||
4073 + (dg_len > frame_max)) {
4074 INFO(port->func.config->cdev,
4075 "Bad dgram length: %#X\n", dg_len);
4076 goto err;
4077 @@ -1283,6 +1312,37 @@ static int ncm_unwrap_ntb(struct gether *port,
4078 index2 = get_ncm(&tmp, opts->dgram_item_len);
4079 dg_len2 = get_ncm(&tmp, opts->dgram_item_len);
4080
4081 + if (index2 == 0 || dg_len2 == 0)
4082 + break;
4083 +
4084 + /* wDatagramIndex[1] */
4085 + if (ndp_after_header) {
4086 + if (index2 < opts->nth_size + opts->ndp_size) {
4087 + INFO(port->func.config->cdev,
4088 + "Bad index: %#X\n", index2);
4089 + goto err;
4090 + }
4091 + } else {
4092 + if (index2 < opts->nth_size + opts->dpe_size) {
4093 + INFO(port->func.config->cdev,
4094 + "Bad index: %#X\n", index2);
4095 + goto err;
4096 + }
4097 + }
4098 + if (index2 > block_len - opts->dpe_size) {
4099 + INFO(port->func.config->cdev,
4100 + "Bad index: %#X\n", index2);
4101 + goto err;
4102 + }
4103 +
4104 + /* wDatagramLength[1] */
4105 + if ((dg_len2 < 14 + crc_len) ||
4106 + (dg_len2 > frame_max)) {
4107 + INFO(port->func.config->cdev,
4108 + "Bad dgram length: %#X\n", dg_len);
4109 + goto err;
4110 + }
4111 +
4112 /*
4113 * Copy the data into a new skb.
4114 * This ensures the truesize is correct
4115 @@ -1299,9 +1359,6 @@ static int ncm_unwrap_ntb(struct gether *port,
4116 ndp_len -= 2 * (opts->dgram_item_len * 2);
4117
4118 dgram_counter++;
4119 -
4120 - if (index2 == 0 || dg_len2 == 0)
4121 - break;
4122 } while (ndp_len > 2 * (opts->dgram_item_len * 2));
4123 } while (ndp_index);
4124
4125 diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
4126 index 7f01f78b1d238..f6d203fec4955 100644
4127 --- a/drivers/usb/gadget/function/f_tcm.c
4128 +++ b/drivers/usb/gadget/function/f_tcm.c
4129 @@ -751,12 +751,13 @@ static int uasp_alloc_stream_res(struct f_uas *fu, struct uas_stream *stream)
4130 goto err_sts;
4131
4132 return 0;
4133 +
4134 err_sts:
4135 - usb_ep_free_request(fu->ep_status, stream->req_status);
4136 - stream->req_status = NULL;
4137 -err_out:
4138 usb_ep_free_request(fu->ep_out, stream->req_out);
4139 stream->req_out = NULL;
4140 +err_out:
4141 + usb_ep_free_request(fu->ep_in, stream->req_in);
4142 + stream->req_in = NULL;
4143 out:
4144 return -ENOMEM;
4145 }
4146 diff --git a/drivers/usb/gadget/u_f.h b/drivers/usb/gadget/u_f.h
4147 index eaa13fd3dc7f3..e313c3b8dcb19 100644
4148 --- a/drivers/usb/gadget/u_f.h
4149 +++ b/drivers/usb/gadget/u_f.h
4150 @@ -14,6 +14,7 @@
4151 #define __U_F_H__
4152
4153 #include <linux/usb/gadget.h>
4154 +#include <linux/overflow.h>
4155
4156 /* Variable Length Array Macros **********************************************/
4157 #define vla_group(groupname) size_t groupname##__next = 0
4158 @@ -21,21 +22,36 @@
4159
4160 #define vla_item(groupname, type, name, n) \
4161 size_t groupname##_##name##__offset = ({ \
4162 - size_t align_mask = __alignof__(type) - 1; \
4163 - size_t offset = (groupname##__next + align_mask) & ~align_mask;\
4164 - size_t size = (n) * sizeof(type); \
4165 - groupname##__next = offset + size; \
4166 + size_t offset = 0; \
4167 + if (groupname##__next != SIZE_MAX) { \
4168 + size_t align_mask = __alignof__(type) - 1; \
4169 + size_t size = array_size(n, sizeof(type)); \
4170 + offset = (groupname##__next + align_mask) & \
4171 + ~align_mask; \
4172 + if (check_add_overflow(offset, size, \
4173 + &groupname##__next)) { \
4174 + groupname##__next = SIZE_MAX; \
4175 + offset = 0; \
4176 + } \
4177 + } \
4178 offset; \
4179 })
4180
4181 #define vla_item_with_sz(groupname, type, name, n) \
4182 - size_t groupname##_##name##__sz = (n) * sizeof(type); \
4183 - size_t groupname##_##name##__offset = ({ \
4184 - size_t align_mask = __alignof__(type) - 1; \
4185 - size_t offset = (groupname##__next + align_mask) & ~align_mask;\
4186 - size_t size = groupname##_##name##__sz; \
4187 - groupname##__next = offset + size; \
4188 - offset; \
4189 + size_t groupname##_##name##__sz = array_size(n, sizeof(type)); \
4190 + size_t groupname##_##name##__offset = ({ \
4191 + size_t offset = 0; \
4192 + if (groupname##__next != SIZE_MAX) { \
4193 + size_t align_mask = __alignof__(type) - 1; \
4194 + offset = (groupname##__next + align_mask) & \
4195 + ~align_mask; \
4196 + if (check_add_overflow(offset, groupname##_##name##__sz,\
4197 + &groupname##__next)) { \
4198 + groupname##__next = SIZE_MAX; \
4199 + offset = 0; \
4200 + } \
4201 + } \
4202 + offset; \
4203 })
4204
4205 #define vla_ptr(ptr, groupname, name) \
4206 diff --git a/drivers/usb/host/ohci-exynos.c b/drivers/usb/host/ohci-exynos.c
4207 index d5ce98e205c73..d8b6c9f5695c9 100644
4208 --- a/drivers/usb/host/ohci-exynos.c
4209 +++ b/drivers/usb/host/ohci-exynos.c
4210 @@ -171,9 +171,8 @@ static int exynos_ohci_probe(struct platform_device *pdev)
4211 hcd->rsrc_len = resource_size(res);
4212
4213 irq = platform_get_irq(pdev, 0);
4214 - if (!irq) {
4215 - dev_err(&pdev->dev, "Failed to get IRQ\n");
4216 - err = -ENODEV;
4217 + if (irq < 0) {
4218 + err = irq;
4219 goto fail_io;
4220 }
4221
4222 diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
4223 index 76c3f29562d2b..448d7b11dec4c 100644
4224 --- a/drivers/usb/host/xhci-debugfs.c
4225 +++ b/drivers/usb/host/xhci-debugfs.c
4226 @@ -273,7 +273,7 @@ static int xhci_slot_context_show(struct seq_file *s, void *unused)
4227
4228 static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
4229 {
4230 - int dci;
4231 + int ep_index;
4232 dma_addr_t dma;
4233 struct xhci_hcd *xhci;
4234 struct xhci_ep_ctx *ep_ctx;
4235 @@ -282,9 +282,9 @@ static int xhci_endpoint_context_show(struct seq_file *s, void *unused)
4236
4237 xhci = hcd_to_xhci(bus_to_hcd(dev->udev->bus));
4238
4239 - for (dci = 1; dci < 32; dci++) {
4240 - ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, dci);
4241 - dma = dev->out_ctx->dma + dci * CTX_SIZE(xhci->hcc_params);
4242 + for (ep_index = 0; ep_index < 31; ep_index++) {
4243 + ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
4244 + dma = dev->out_ctx->dma + (ep_index + 1) * CTX_SIZE(xhci->hcc_params);
4245 seq_printf(s, "%pad: %s\n", &dma,
4246 xhci_decode_ep_context(le32_to_cpu(ep_ctx->ep_info),
4247 le32_to_cpu(ep_ctx->ep_info2),
4248 diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
4249 index 3196de2931b12..933936abb6fb7 100644
4250 --- a/drivers/usb/host/xhci-hub.c
4251 +++ b/drivers/usb/host/xhci-hub.c
4252 @@ -738,15 +738,6 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
4253 {
4254 u32 pls = status_reg & PORT_PLS_MASK;
4255
4256 - /* resume state is a xHCI internal state.
4257 - * Do not report it to usb core, instead, pretend to be U3,
4258 - * thus usb core knows it's not ready for transfer
4259 - */
4260 - if (pls == XDEV_RESUME) {
4261 - *status |= USB_SS_PORT_LS_U3;
4262 - return;
4263 - }
4264 -
4265 /* When the CAS bit is set then warm reset
4266 * should be performed on port
4267 */
4268 @@ -768,6 +759,16 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
4269 */
4270 pls |= USB_PORT_STAT_CONNECTION;
4271 } else {
4272 + /*
4273 + * Resume state is an xHCI internal state. Do not report it to
4274 + * usb core, instead, pretend to be U3, thus usb core knows
4275 + * it's not ready for transfer.
4276 + */
4277 + if (pls == XDEV_RESUME) {
4278 + *status |= USB_SS_PORT_LS_U3;
4279 + return;
4280 + }
4281 +
4282 /*
4283 * If CAS bit isn't set but the Port is already at
4284 * Compliance Mode, fake a connection so the USB core
4285 diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
4286 index 11a65854d3f09..bad154f446f8d 100644
4287 --- a/drivers/usb/host/xhci.c
4288 +++ b/drivers/usb/host/xhci.c
4289 @@ -3236,10 +3236,11 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
4290
4291 wait_for_completion(cfg_cmd->completion);
4292
4293 - ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
4294 xhci_free_command(xhci, cfg_cmd);
4295 cleanup:
4296 xhci_free_command(xhci, stop_cmd);
4297 + if (ep->ep_state & EP_SOFT_CLEAR_TOGGLE)
4298 + ep->ep_state &= ~EP_SOFT_CLEAR_TOGGLE;
4299 }
4300
4301 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
4302 diff --git a/drivers/usb/misc/lvstest.c b/drivers/usb/misc/lvstest.c
4303 index 407fe7570f3bc..f8686139d6f39 100644
4304 --- a/drivers/usb/misc/lvstest.c
4305 +++ b/drivers/usb/misc/lvstest.c
4306 @@ -426,7 +426,7 @@ static int lvs_rh_probe(struct usb_interface *intf,
4307 USB_DT_SS_HUB_SIZE, USB_CTRL_GET_TIMEOUT);
4308 if (ret < (USB_DT_HUB_NONVAR_SIZE + 2)) {
4309 dev_err(&hdev->dev, "wrong root hub descriptor read %d\n", ret);
4310 - return ret;
4311 + return ret < 0 ? ret : -EINVAL;
4312 }
4313
4314 /* submit urb to poll interrupt endpoint */
4315 diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c
4316 index fc8a5da4a07c9..0734e6dd93862 100644
4317 --- a/drivers/usb/misc/sisusbvga/sisusb.c
4318 +++ b/drivers/usb/misc/sisusbvga/sisusb.c
4319 @@ -761,7 +761,7 @@ static int sisusb_write_mem_bulk(struct sisusb_usb_data *sisusb, u32 addr,
4320 u8 swap8, fromkern = kernbuffer ? 1 : 0;
4321 u16 swap16;
4322 u32 swap32, flag = (length >> 28) & 1;
4323 - char buf[4];
4324 + u8 buf[4];
4325
4326 /* if neither kernbuffer not userbuffer are given, assume
4327 * data in obuf
4328 diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
4329 index be0505b8b5d4e..785080f790738 100644
4330 --- a/drivers/usb/misc/yurex.c
4331 +++ b/drivers/usb/misc/yurex.c
4332 @@ -492,7 +492,7 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
4333 prepare_to_wait(&dev->waitq, &wait, TASK_INTERRUPTIBLE);
4334 dev_dbg(&dev->interface->dev, "%s - submit %c\n", __func__,
4335 dev->cntl_buffer[0]);
4336 - retval = usb_submit_urb(dev->cntl_urb, GFP_KERNEL);
4337 + retval = usb_submit_urb(dev->cntl_urb, GFP_ATOMIC);
4338 if (retval >= 0)
4339 timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
4340 finish_wait(&dev->waitq, &wait);
4341 diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
4342 index f6c3681fa2e9e..88275842219ef 100644
4343 --- a/drivers/usb/storage/unusual_devs.h
4344 +++ b/drivers/usb/storage/unusual_devs.h
4345 @@ -2328,7 +2328,7 @@ UNUSUAL_DEV( 0x357d, 0x7788, 0x0114, 0x0114,
4346 "JMicron",
4347 "USB to ATA/ATAPI Bridge",
4348 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4349 - US_FL_BROKEN_FUA ),
4350 + US_FL_BROKEN_FUA | US_FL_IGNORE_UAS ),
4351
4352 /* Reported by Andrey Rahmatullin <wrar@altlinux.org> */
4353 UNUSUAL_DEV( 0x4102, 0x1020, 0x0100, 0x0100,
4354 diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
4355 index 37157ed9a881a..dcdfcdfd2ad13 100644
4356 --- a/drivers/usb/storage/unusual_uas.h
4357 +++ b/drivers/usb/storage/unusual_uas.h
4358 @@ -28,6 +28,13 @@
4359 * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
4360 */
4361
4362 +/* Reported-by: Till Dörges <doerges@pre-sense.de> */
4363 +UNUSUAL_DEV(0x054c, 0x087d, 0x0000, 0x9999,
4364 + "Sony",
4365 + "PSZ-HA*",
4366 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4367 + US_FL_NO_REPORT_OPCODES),
4368 +
4369 /* Reported-by: Julian Groß <julian.g@posteo.de> */
4370 UNUSUAL_DEV(0x059f, 0x105f, 0x0000, 0x9999,
4371 "LaCie",
4372 @@ -80,6 +87,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
4373 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4374 US_FL_BROKEN_FUA),
4375
4376 +/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
4377 +UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
4378 + "PNY",
4379 + "Pro Elite SSD",
4380 + USB_SC_DEVICE, USB_PR_DEVICE, NULL,
4381 + US_FL_NO_ATA_1X),
4382 +
4383 /* Reported-by: Hans de Goede <hdegoede@redhat.com> */
4384 UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
4385 "VIA",
4386 diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
4387 index 22070cfea1d06..31f00d72f1493 100644
4388 --- a/drivers/video/fbdev/core/fbcon.c
4389 +++ b/drivers/video/fbdev/core/fbcon.c
4390 @@ -2185,6 +2185,9 @@ static void updatescrollmode(struct fbcon_display *p,
4391 }
4392 }
4393
4394 +#define PITCH(w) (((w) + 7) >> 3)
4395 +#define CALC_FONTSZ(h, p, c) ((h) * (p) * (c)) /* size = height * pitch * charcount */
4396 +
4397 static int fbcon_resize(struct vc_data *vc, unsigned int width,
4398 unsigned int height, unsigned int user)
4399 {
4400 @@ -2194,6 +2197,24 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
4401 struct fb_var_screeninfo var = info->var;
4402 int x_diff, y_diff, virt_w, virt_h, virt_fw, virt_fh;
4403
4404 + if (ops->p && ops->p->userfont && FNTSIZE(vc->vc_font.data)) {
4405 + int size;
4406 + int pitch = PITCH(vc->vc_font.width);
4407 +
4408 + /*
4409 + * If user font, ensure that a possible change to user font
4410 + * height or width will not allow a font data out-of-bounds access.
4411 + * NOTE: must use original charcount in calculation as font
4412 + * charcount can change and cannot be used to determine the
4413 + * font data allocated size.
4414 + */
4415 + if (pitch <= 0)
4416 + return -EINVAL;
4417 + size = CALC_FONTSZ(vc->vc_font.height, pitch, FNTCHARCNT(vc->vc_font.data));
4418 + if (size > FNTSIZE(vc->vc_font.data))
4419 + return -EINVAL;
4420 + }
4421 +
4422 virt_w = FBCON_SWAP(ops->rotate, width, height);
4423 virt_h = FBCON_SWAP(ops->rotate, height, width);
4424 virt_fw = FBCON_SWAP(ops->rotate, vc->vc_font.width,
4425 @@ -2645,7 +2666,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
4426 int size;
4427 int i, csum;
4428 u8 *new_data, *data = font->data;
4429 - int pitch = (font->width+7) >> 3;
4430 + int pitch = PITCH(font->width);
4431
4432 /* Is there a reason why fbconsole couldn't handle any charcount >256?
4433 * If not this check should be changed to charcount < 256 */
4434 @@ -2661,7 +2682,7 @@ static int fbcon_set_font(struct vc_data *vc, struct console_font *font,
4435 if (fbcon_invalid_charcount(info, charcount))
4436 return -EINVAL;
4437
4438 - size = h * pitch * charcount;
4439 + size = CALC_FONTSZ(h, pitch, charcount);
4440
4441 new_data = kmalloc(FONT_EXTRA_WORDS * sizeof(int) + size, GFP_USER);
4442
4443 diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
4444 index e72738371ecbe..97abcd497c7e0 100644
4445 --- a/drivers/video/fbdev/core/fbmem.c
4446 +++ b/drivers/video/fbdev/core/fbmem.c
4447 @@ -952,7 +952,6 @@ static int fb_check_caps(struct fb_info *info, struct fb_var_screeninfo *var,
4448 int
4449 fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
4450 {
4451 - int flags = info->flags;
4452 int ret = 0;
4453 u32 activate;
4454 struct fb_var_screeninfo old_var;
4455 @@ -1047,9 +1046,6 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
4456 event.data = &mode;
4457 fb_notifier_call_chain(FB_EVENT_MODE_CHANGE, &event);
4458
4459 - if (flags & FBINFO_MISC_USEREVENT)
4460 - fbcon_update_vcs(info, activate & FB_ACTIVATE_ALL);
4461 -
4462 return 0;
4463 }
4464 EXPORT_SYMBOL(fb_set_var);
4465 @@ -1100,9 +1096,9 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
4466 return -EFAULT;
4467 console_lock();
4468 lock_fb_info(info);
4469 - info->flags |= FBINFO_MISC_USEREVENT;
4470 ret = fb_set_var(info, &var);
4471 - info->flags &= ~FBINFO_MISC_USEREVENT;
4472 + if (!ret)
4473 + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
4474 unlock_fb_info(info);
4475 console_unlock();
4476 if (!ret && copy_to_user(argp, &var, sizeof(var)))
4477 diff --git a/drivers/video/fbdev/core/fbsysfs.c b/drivers/video/fbdev/core/fbsysfs.c
4478 index d54c88f88991d..65dae05fff8e6 100644
4479 --- a/drivers/video/fbdev/core/fbsysfs.c
4480 +++ b/drivers/video/fbdev/core/fbsysfs.c
4481 @@ -91,9 +91,9 @@ static int activate(struct fb_info *fb_info, struct fb_var_screeninfo *var)
4482
4483 var->activate |= FB_ACTIVATE_FORCE;
4484 console_lock();
4485 - fb_info->flags |= FBINFO_MISC_USEREVENT;
4486 err = fb_set_var(fb_info, var);
4487 - fb_info->flags &= ~FBINFO_MISC_USEREVENT;
4488 + if (!err)
4489 + fbcon_update_vcs(fb_info, var->activate & FB_ACTIVATE_ALL);
4490 console_unlock();
4491 if (err)
4492 return err;
4493 diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
4494 index 376ee5bc3ddc9..34e8171856e95 100644
4495 --- a/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
4496 +++ b/drivers/video/fbdev/omap2/omapfb/dss/dispc.c
4497 @@ -520,8 +520,11 @@ int dispc_runtime_get(void)
4498 DSSDBG("dispc_runtime_get\n");
4499
4500 r = pm_runtime_get_sync(&dispc.pdev->dev);
4501 - WARN_ON(r < 0);
4502 - return r < 0 ? r : 0;
4503 + if (WARN_ON(r < 0)) {
4504 + pm_runtime_put_sync(&dispc.pdev->dev);
4505 + return r;
4506 + }
4507 + return 0;
4508 }
4509 EXPORT_SYMBOL(dispc_runtime_get);
4510
4511 diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
4512 index d620376216e1d..6f9c25fec9946 100644
4513 --- a/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
4514 +++ b/drivers/video/fbdev/omap2/omapfb/dss/dsi.c
4515 @@ -1137,8 +1137,11 @@ static int dsi_runtime_get(struct platform_device *dsidev)
4516 DSSDBG("dsi_runtime_get\n");
4517
4518 r = pm_runtime_get_sync(&dsi->pdev->dev);
4519 - WARN_ON(r < 0);
4520 - return r < 0 ? r : 0;
4521 + if (WARN_ON(r < 0)) {
4522 + pm_runtime_put_sync(&dsi->pdev->dev);
4523 + return r;
4524 + }
4525 + return 0;
4526 }
4527
4528 static void dsi_runtime_put(struct platform_device *dsidev)
4529 diff --git a/drivers/video/fbdev/omap2/omapfb/dss/dss.c b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
4530 index bfc5c4c5a26ad..a6b1c1598040d 100644
4531 --- a/drivers/video/fbdev/omap2/omapfb/dss/dss.c
4532 +++ b/drivers/video/fbdev/omap2/omapfb/dss/dss.c
4533 @@ -768,8 +768,11 @@ int dss_runtime_get(void)
4534 DSSDBG("dss_runtime_get\n");
4535
4536 r = pm_runtime_get_sync(&dss.pdev->dev);
4537 - WARN_ON(r < 0);
4538 - return r < 0 ? r : 0;
4539 + if (WARN_ON(r < 0)) {
4540 + pm_runtime_put_sync(&dss.pdev->dev);
4541 + return r;
4542 + }
4543 + return 0;
4544 }
4545
4546 void dss_runtime_put(void)
4547 diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
4548 index 7060ae56c062c..4804aab342981 100644
4549 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
4550 +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi4.c
4551 @@ -39,9 +39,10 @@ static int hdmi_runtime_get(void)
4552 DSSDBG("hdmi_runtime_get\n");
4553
4554 r = pm_runtime_get_sync(&hdmi.pdev->dev);
4555 - WARN_ON(r < 0);
4556 - if (r < 0)
4557 + if (WARN_ON(r < 0)) {
4558 + pm_runtime_put_sync(&hdmi.pdev->dev);
4559 return r;
4560 + }
4561
4562 return 0;
4563 }
4564 diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
4565 index ac49531e47327..a06b6f1355bdb 100644
4566 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
4567 +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi5.c
4568 @@ -43,9 +43,10 @@ static int hdmi_runtime_get(void)
4569 DSSDBG("hdmi_runtime_get\n");
4570
4571 r = pm_runtime_get_sync(&hdmi.pdev->dev);
4572 - WARN_ON(r < 0);
4573 - if (r < 0)
4574 + if (WARN_ON(r < 0)) {
4575 + pm_runtime_put_sync(&hdmi.pdev->dev);
4576 return r;
4577 + }
4578
4579 return 0;
4580 }
4581 diff --git a/drivers/video/fbdev/omap2/omapfb/dss/venc.c b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
4582 index f81e2a46366dd..3717dac3dcc83 100644
4583 --- a/drivers/video/fbdev/omap2/omapfb/dss/venc.c
4584 +++ b/drivers/video/fbdev/omap2/omapfb/dss/venc.c
4585 @@ -391,8 +391,11 @@ static int venc_runtime_get(void)
4586 DSSDBG("venc_runtime_get\n");
4587
4588 r = pm_runtime_get_sync(&venc.pdev->dev);
4589 - WARN_ON(r < 0);
4590 - return r < 0 ? r : 0;
4591 + if (WARN_ON(r < 0)) {
4592 + pm_runtime_put_sync(&venc.pdev->dev);
4593 + return r;
4594 + }
4595 + return 0;
4596 }
4597
4598 static void venc_runtime_put(void)
4599 diff --git a/drivers/video/fbdev/ps3fb.c b/drivers/video/fbdev/ps3fb.c
4600 index 5ed2db39d8236..ce90483c50209 100644
4601 --- a/drivers/video/fbdev/ps3fb.c
4602 +++ b/drivers/video/fbdev/ps3fb.c
4603 @@ -29,6 +29,7 @@
4604 #include <linux/freezer.h>
4605 #include <linux/uaccess.h>
4606 #include <linux/fb.h>
4607 +#include <linux/fbcon.h>
4608 #include <linux/init.h>
4609
4610 #include <asm/cell-regs.h>
4611 @@ -824,12 +825,12 @@ static int ps3fb_ioctl(struct fb_info *info, unsigned int cmd,
4612 var = info->var;
4613 fb_videomode_to_var(&var, vmode);
4614 console_lock();
4615 - info->flags |= FBINFO_MISC_USEREVENT;
4616 /* Force, in case only special bits changed */
4617 var.activate |= FB_ACTIVATE_FORCE;
4618 par->new_mode_id = val;
4619 retval = fb_set_var(info, &var);
4620 - info->flags &= ~FBINFO_MISC_USEREVENT;
4621 + if (!retval)
4622 + fbcon_update_vcs(info, var.activate & FB_ACTIVATE_ALL);
4623 console_unlock();
4624 }
4625 break;
4626 diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
4627 index 6c8843968a52d..55f2b834cf130 100644
4628 --- a/drivers/xen/events/events_base.c
4629 +++ b/drivers/xen/events/events_base.c
4630 @@ -155,7 +155,7 @@ int get_evtchn_to_irq(unsigned evtchn)
4631 /* Get info for IRQ */
4632 struct irq_info *info_for_irq(unsigned irq)
4633 {
4634 - return irq_get_handler_data(irq);
4635 + return irq_get_chip_data(irq);
4636 }
4637
4638 /* Constructors for packed IRQ information. */
4639 @@ -376,7 +376,7 @@ static void xen_irq_init(unsigned irq)
4640 info->type = IRQT_UNBOUND;
4641 info->refcnt = -1;
4642
4643 - irq_set_handler_data(irq, info);
4644 + irq_set_chip_data(irq, info);
4645
4646 list_add_tail(&info->list, &xen_irq_list_head);
4647 }
4648 @@ -425,14 +425,14 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
4649
4650 static void xen_free_irq(unsigned irq)
4651 {
4652 - struct irq_info *info = irq_get_handler_data(irq);
4653 + struct irq_info *info = irq_get_chip_data(irq);
4654
4655 if (WARN_ON(!info))
4656 return;
4657
4658 list_del(&info->list);
4659
4660 - irq_set_handler_data(irq, NULL);
4661 + irq_set_chip_data(irq, NULL);
4662
4663 WARN_ON(info->refcnt > 0);
4664
4665 @@ -602,7 +602,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
4666 static void __unbind_from_irq(unsigned int irq)
4667 {
4668 int evtchn = evtchn_from_irq(irq);
4669 - struct irq_info *info = irq_get_handler_data(irq);
4670 + struct irq_info *info = irq_get_chip_data(irq);
4671
4672 if (info->refcnt > 0) {
4673 info->refcnt--;
4674 @@ -1106,7 +1106,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi,
4675
4676 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
4677 {
4678 - struct irq_info *info = irq_get_handler_data(irq);
4679 + struct irq_info *info = irq_get_chip_data(irq);
4680
4681 if (WARN_ON(!info))
4682 return;
4683 @@ -1140,7 +1140,7 @@ int evtchn_make_refcounted(unsigned int evtchn)
4684 if (irq == -1)
4685 return -ENOENT;
4686
4687 - info = irq_get_handler_data(irq);
4688 + info = irq_get_chip_data(irq);
4689
4690 if (!info)
4691 return -ENOENT;
4692 @@ -1168,7 +1168,7 @@ int evtchn_get(unsigned int evtchn)
4693 if (irq == -1)
4694 goto done;
4695
4696 - info = irq_get_handler_data(irq);
4697 + info = irq_get_chip_data(irq);
4698
4699 if (!info)
4700 goto done;
4701 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
4702 index 831a2b25ba79f..196f9f64d075c 100644
4703 --- a/fs/binfmt_flat.c
4704 +++ b/fs/binfmt_flat.c
4705 @@ -571,7 +571,7 @@ static int load_flat_file(struct linux_binprm *bprm,
4706 goto err;
4707 }
4708
4709 - len = data_len + extra;
4710 + len = data_len + extra + MAX_SHARED_LIBS * sizeof(unsigned long);
4711 len = PAGE_ALIGN(len);
4712 realdatastart = vm_mmap(NULL, 0, len,
4713 PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
4714 @@ -585,7 +585,9 @@ static int load_flat_file(struct linux_binprm *bprm,
4715 vm_munmap(textpos, text_len);
4716 goto err;
4717 }
4718 - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN);
4719 + datapos = ALIGN(realdatastart +
4720 + MAX_SHARED_LIBS * sizeof(unsigned long),
4721 + FLAT_DATA_ALIGN);
4722
4723 pr_debug("Allocated data+bss+stack (%u bytes): %lx\n",
4724 data_len + bss_len + stack_len, datapos);
4725 @@ -615,7 +617,7 @@ static int load_flat_file(struct linux_binprm *bprm,
4726 memp_size = len;
4727 } else {
4728
4729 - len = text_len + data_len + extra;
4730 + len = text_len + data_len + extra + MAX_SHARED_LIBS * sizeof(u32);
4731 len = PAGE_ALIGN(len);
4732 textpos = vm_mmap(NULL, 0, len,
4733 PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
4734 @@ -630,7 +632,9 @@ static int load_flat_file(struct linux_binprm *bprm,
4735 }
4736
4737 realdatastart = textpos + ntohl(hdr->data_start);
4738 - datapos = ALIGN(realdatastart, FLAT_DATA_ALIGN);
4739 + datapos = ALIGN(realdatastart +
4740 + MAX_SHARED_LIBS * sizeof(u32),
4741 + FLAT_DATA_ALIGN);
4742
4743 reloc = (__be32 __user *)
4744 (datapos + (ntohl(hdr->reloc_start) - text_len));
4745 @@ -647,9 +651,8 @@ static int load_flat_file(struct linux_binprm *bprm,
4746 (text_len + full_data
4747 - sizeof(struct flat_hdr)),
4748 0);
4749 - if (datapos != realdatastart)
4750 - memmove((void *)datapos, (void *)realdatastart,
4751 - full_data);
4752 + memmove((void *) datapos, (void *) realdatastart,
4753 + full_data);
4754 #else
4755 /*
4756 * This is used on MMU systems mainly for testing.
4757 @@ -705,7 +708,8 @@ static int load_flat_file(struct linux_binprm *bprm,
4758 if (IS_ERR_VALUE(result)) {
4759 ret = result;
4760 pr_err("Unable to read code+data+bss, errno %d\n", ret);
4761 - vm_munmap(textpos, text_len + data_len + extra);
4762 + vm_munmap(textpos, text_len + data_len + extra +
4763 + MAX_SHARED_LIBS * sizeof(u32));
4764 goto err;
4765 }
4766 }
4767 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
4768 index 18357b054a91e..9a690c10afaa0 100644
4769 --- a/fs/btrfs/ctree.h
4770 +++ b/fs/btrfs/ctree.h
4771 @@ -2415,7 +2415,7 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
4772 u64 bytenr, u64 num_bytes);
4773 int btrfs_exclude_logged_extents(struct extent_buffer *eb);
4774 int btrfs_cross_ref_exist(struct btrfs_root *root,
4775 - u64 objectid, u64 offset, u64 bytenr);
4776 + u64 objectid, u64 offset, u64 bytenr, bool strict);
4777 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
4778 struct btrfs_root *root,
4779 u64 parent, u64 root_objectid,
4780 @@ -2821,7 +2821,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct btrfs_inode *inode,
4781 u64 start, u64 len);
4782 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
4783 u64 *orig_start, u64 *orig_block_len,
4784 - u64 *ram_bytes);
4785 + u64 *ram_bytes, bool strict);
4786
4787 void __btrfs_del_delalloc_inode(struct btrfs_root *root,
4788 struct btrfs_inode *inode);
4789 diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
4790 index ad1c8e3b8133a..dd6fb2ee80409 100644
4791 --- a/fs/btrfs/disk-io.c
4792 +++ b/fs/btrfs/disk-io.c
4793 @@ -4477,6 +4477,7 @@ static void btrfs_cleanup_bg_io(struct btrfs_block_group_cache *cache)
4794 cache->io_ctl.inode = NULL;
4795 iput(inode);
4796 }
4797 + ASSERT(cache->io_ctl.pages == NULL);
4798 btrfs_put_block_group(cache);
4799 }
4800
4801 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
4802 index a36bd4507bacd..ef05cbacef73f 100644
4803 --- a/fs/btrfs/extent-tree.c
4804 +++ b/fs/btrfs/extent-tree.c
4805 @@ -2320,7 +2320,8 @@ static noinline int check_delayed_ref(struct btrfs_root *root,
4806
4807 static noinline int check_committed_ref(struct btrfs_root *root,
4808 struct btrfs_path *path,
4809 - u64 objectid, u64 offset, u64 bytenr)
4810 + u64 objectid, u64 offset, u64 bytenr,
4811 + bool strict)
4812 {
4813 struct btrfs_fs_info *fs_info = root->fs_info;
4814 struct btrfs_root *extent_root = fs_info->extent_root;
4815 @@ -2362,9 +2363,13 @@ static noinline int check_committed_ref(struct btrfs_root *root,
4816 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
4817 goto out;
4818
4819 - /* If extent created before last snapshot => it's definitely shared */
4820 - if (btrfs_extent_generation(leaf, ei) <=
4821 - btrfs_root_last_snapshot(&root->root_item))
4822 + /*
4823 + * If extent created before last snapshot => it's shared unless the
4824 + * snapshot has been deleted. Use the heuristic if strict is false.
4825 + */
4826 + if (!strict &&
4827 + (btrfs_extent_generation(leaf, ei) <=
4828 + btrfs_root_last_snapshot(&root->root_item)))
4829 goto out;
4830
4831 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
4832 @@ -2389,7 +2394,7 @@ out:
4833 }
4834
4835 int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
4836 - u64 bytenr)
4837 + u64 bytenr, bool strict)
4838 {
4839 struct btrfs_path *path;
4840 int ret;
4841 @@ -2400,7 +2405,7 @@ int btrfs_cross_ref_exist(struct btrfs_root *root, u64 objectid, u64 offset,
4842
4843 do {
4844 ret = check_committed_ref(root, path, objectid,
4845 - offset, bytenr);
4846 + offset, bytenr, strict);
4847 if (ret && ret != -ENOENT)
4848 goto out;
4849
4850 diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
4851 index 3cfbccacef7fd..4e4ddd5629e55 100644
4852 --- a/fs/btrfs/file.c
4853 +++ b/fs/btrfs/file.c
4854 @@ -1568,7 +1568,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
4855
4856 num_bytes = lockend - lockstart + 1;
4857 ret = can_nocow_extent(&inode->vfs_inode, lockstart, &num_bytes,
4858 - NULL, NULL, NULL);
4859 + NULL, NULL, NULL, false);
4860 if (ret <= 0) {
4861 ret = 0;
4862 btrfs_end_write_no_snapshotting(root);
4863 @@ -3130,14 +3130,14 @@ reserve_space:
4864 if (ret < 0)
4865 goto out;
4866 space_reserved = true;
4867 - ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
4868 - alloc_start, bytes_to_reserve);
4869 - if (ret)
4870 - goto out;
4871 ret = btrfs_punch_hole_lock_range(inode, lockstart, lockend,
4872 &cached_state);
4873 if (ret)
4874 goto out;
4875 + ret = btrfs_qgroup_reserve_data(inode, &data_reserved,
4876 + alloc_start, bytes_to_reserve);
4877 + if (ret)
4878 + goto out;
4879 ret = btrfs_prealloc_file_range(inode, mode, alloc_start,
4880 alloc_end - alloc_start,
4881 i_blocksize(inode),
4882 diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
4883 index 8bfc0f348ad55..6e6be922b937d 100644
4884 --- a/fs/btrfs/free-space-cache.c
4885 +++ b/fs/btrfs/free-space-cache.c
4886 @@ -1166,7 +1166,6 @@ static int __btrfs_wait_cache_io(struct btrfs_root *root,
4887 ret = update_cache_item(trans, root, inode, path, offset,
4888 io_ctl->entries, io_ctl->bitmaps);
4889 out:
4890 - io_ctl_free(io_ctl);
4891 if (ret) {
4892 invalidate_inode_pages2(inode->i_mapping);
4893 BTRFS_I(inode)->generation = 0;
4894 @@ -1329,6 +1328,7 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
4895 * them out later
4896 */
4897 io_ctl_drop_pages(io_ctl);
4898 + io_ctl_free(io_ctl);
4899
4900 unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
4901 i_size_read(inode) - 1, &cached_state);
4902 diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
4903 index fa7f3a59813ea..9ac40991a6405 100644
4904 --- a/fs/btrfs/inode.c
4905 +++ b/fs/btrfs/inode.c
4906 @@ -1578,7 +1578,7 @@ next_slot:
4907 goto out_check;
4908 ret = btrfs_cross_ref_exist(root, ino,
4909 found_key.offset -
4910 - extent_offset, disk_bytenr);
4911 + extent_offset, disk_bytenr, false);
4912 if (ret) {
4913 /*
4914 * ret could be -EIO if the above fails to read
4915 @@ -7529,7 +7529,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
4916 */
4917 noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
4918 u64 *orig_start, u64 *orig_block_len,
4919 - u64 *ram_bytes)
4920 + u64 *ram_bytes, bool strict)
4921 {
4922 struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
4923 struct btrfs_path *path;
4924 @@ -7607,8 +7607,9 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
4925 * Do the same check as in btrfs_cross_ref_exist but without the
4926 * unnecessary search.
4927 */
4928 - if (btrfs_file_extent_generation(leaf, fi) <=
4929 - btrfs_root_last_snapshot(&root->root_item))
4930 + if (!strict &&
4931 + (btrfs_file_extent_generation(leaf, fi) <=
4932 + btrfs_root_last_snapshot(&root->root_item)))
4933 goto out;
4934
4935 backref_offset = btrfs_file_extent_offset(leaf, fi);
4936 @@ -7644,7 +7645,8 @@ noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
4937 */
4938
4939 ret = btrfs_cross_ref_exist(root, btrfs_ino(BTRFS_I(inode)),
4940 - key.offset - backref_offset, disk_bytenr);
4941 + key.offset - backref_offset, disk_bytenr,
4942 + strict);
4943 if (ret) {
4944 ret = 0;
4945 goto out;
4946 @@ -7865,7 +7867,7 @@ static int btrfs_get_blocks_direct_write(struct extent_map **map,
4947 block_start = em->block_start + (start - em->start);
4948
4949 if (can_nocow_extent(inode, start, &len, &orig_start,
4950 - &orig_block_len, &ram_bytes) == 1 &&
4951 + &orig_block_len, &ram_bytes, false) == 1 &&
4952 btrfs_inc_nocow_writers(fs_info, block_start)) {
4953 struct extent_map *em2;
4954
4955 @@ -9568,7 +9570,7 @@ void btrfs_destroy_inode(struct inode *inode)
4956 btrfs_put_ordered_extent(ordered);
4957 }
4958 }
4959 - btrfs_qgroup_check_reserved_leak(inode);
4960 + btrfs_qgroup_check_reserved_leak(BTRFS_I(inode));
4961 inode_tree_del(inode);
4962 btrfs_drop_extent_cache(BTRFS_I(inode), 0, (u64)-1, 0);
4963 }
4964 @@ -11030,7 +11032,7 @@ static int btrfs_swap_activate(struct swap_info_struct *sis, struct file *file,
4965 free_extent_map(em);
4966 em = NULL;
4967
4968 - ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL);
4969 + ret = can_nocow_extent(inode, start, &len, NULL, NULL, NULL, true);
4970 if (ret < 0) {
4971 goto out;
4972 } else if (ret) {
4973 diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
4974 index b94f6f99e90d0..04fd02e6124dd 100644
4975 --- a/fs/btrfs/qgroup.c
4976 +++ b/fs/btrfs/qgroup.c
4977 @@ -3769,7 +3769,7 @@ void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes)
4978 * Check qgroup reserved space leaking, normally at destroy inode
4979 * time
4980 */
4981 -void btrfs_qgroup_check_reserved_leak(struct inode *inode)
4982 +void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode)
4983 {
4984 struct extent_changeset changeset;
4985 struct ulist_node *unode;
4986 @@ -3777,19 +3777,19 @@ void btrfs_qgroup_check_reserved_leak(struct inode *inode)
4987 int ret;
4988
4989 extent_changeset_init(&changeset);
4990 - ret = clear_record_extent_bits(&BTRFS_I(inode)->io_tree, 0, (u64)-1,
4991 + ret = clear_record_extent_bits(&inode->io_tree, 0, (u64)-1,
4992 EXTENT_QGROUP_RESERVED, &changeset);
4993
4994 WARN_ON(ret < 0);
4995 if (WARN_ON(changeset.bytes_changed)) {
4996 ULIST_ITER_INIT(&iter);
4997 while ((unode = ulist_next(&changeset.range_changed, &iter))) {
4998 - btrfs_warn(BTRFS_I(inode)->root->fs_info,
4999 - "leaking qgroup reserved space, ino: %lu, start: %llu, end: %llu",
5000 - inode->i_ino, unode->val, unode->aux);
5001 + btrfs_warn(inode->root->fs_info,
5002 + "leaking qgroup reserved space, ino: %llu, start: %llu, end: %llu",
5003 + btrfs_ino(inode), unode->val, unode->aux);
5004 }
5005 - btrfs_qgroup_free_refroot(BTRFS_I(inode)->root->fs_info,
5006 - BTRFS_I(inode)->root->root_key.objectid,
5007 + btrfs_qgroup_free_refroot(inode->root->fs_info,
5008 + inode->root->root_key.objectid,
5009 changeset.bytes_changed, BTRFS_QGROUP_RSV_DATA);
5010
5011 }
5012 diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
5013 index 17e8ac992c502..b0420c4f5d0ef 100644
5014 --- a/fs/btrfs/qgroup.h
5015 +++ b/fs/btrfs/qgroup.h
5016 @@ -399,7 +399,7 @@ void btrfs_qgroup_free_meta_all_pertrans(struct btrfs_root *root);
5017 */
5018 void btrfs_qgroup_convert_reserved_meta(struct btrfs_root *root, int num_bytes);
5019
5020 -void btrfs_qgroup_check_reserved_leak(struct inode *inode);
5021 +void btrfs_qgroup_check_reserved_leak(struct btrfs_inode *inode);
5022
5023 /* btrfs_qgroup_swapped_blocks related functions */
5024 void btrfs_qgroup_init_swapped_blocks(
5025 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
5026 index a1498df419b4f..6a2ae208ff80a 100644
5027 --- a/fs/btrfs/super.c
5028 +++ b/fs/btrfs/super.c
5029 @@ -544,6 +544,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
5030 } else if (strncmp(args[0].from, "lzo", 3) == 0) {
5031 compress_type = "lzo";
5032 info->compress_type = BTRFS_COMPRESS_LZO;
5033 + info->compress_level = 0;
5034 btrfs_set_opt(info->mount_opt, COMPRESS);
5035 btrfs_clear_opt(info->mount_opt, NODATACOW);
5036 btrfs_clear_opt(info->mount_opt, NODATASUM);
5037 diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
5038 index 3c090549ed07d..7042b84edc89d 100644
5039 --- a/fs/btrfs/tree-log.c
5040 +++ b/fs/btrfs/tree-log.c
5041 @@ -3473,11 +3473,13 @@ fail:
5042 btrfs_free_path(path);
5043 out_unlock:
5044 mutex_unlock(&dir->log_mutex);
5045 - if (ret == -ENOSPC) {
5046 + if (err == -ENOSPC) {
5047 btrfs_set_log_full_commit(trans);
5048 - ret = 0;
5049 - } else if (ret < 0)
5050 - btrfs_abort_transaction(trans, ret);
5051 + err = 0;
5052 + } else if (err < 0 && err != -ENOENT) {
5053 + /* ENOENT can be returned if the entry hasn't been fsynced yet */
5054 + btrfs_abort_transaction(trans, err);
5055 + }
5056
5057 btrfs_end_log_trans(root);
5058
5059 @@ -4994,6 +4996,138 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
5060 return ret;
5061 }
5062
5063 +static int copy_inode_items_to_log(struct btrfs_trans_handle *trans,
5064 + struct btrfs_inode *inode,
5065 + struct btrfs_key *min_key,
5066 + const struct btrfs_key *max_key,
5067 + struct btrfs_path *path,
5068 + struct btrfs_path *dst_path,
5069 + const u64 logged_isize,
5070 + const bool recursive_logging,
5071 + const int inode_only,
5072 + struct btrfs_log_ctx *ctx,
5073 + bool *need_log_inode_item)
5074 +{
5075 + struct btrfs_root *root = inode->root;
5076 + int ins_start_slot = 0;
5077 + int ins_nr = 0;
5078 + int ret;
5079 +
5080 + while (1) {
5081 + ret = btrfs_search_forward(root, min_key, path, trans->transid);
5082 + if (ret < 0)
5083 + return ret;
5084 + if (ret > 0) {
5085 + ret = 0;
5086 + break;
5087 + }
5088 +again:
5089 + /* Note, ins_nr might be > 0 here, cleanup outside the loop */
5090 + if (min_key->objectid != max_key->objectid)
5091 + break;
5092 + if (min_key->type > max_key->type)
5093 + break;
5094 +
5095 + if (min_key->type == BTRFS_INODE_ITEM_KEY)
5096 + *need_log_inode_item = false;
5097 +
5098 + if ((min_key->type == BTRFS_INODE_REF_KEY ||
5099 + min_key->type == BTRFS_INODE_EXTREF_KEY) &&
5100 + inode->generation == trans->transid &&
5101 + !recursive_logging) {
5102 + u64 other_ino = 0;
5103 + u64 other_parent = 0;
5104 +
5105 + ret = btrfs_check_ref_name_override(path->nodes[0],
5106 + path->slots[0], min_key, inode,
5107 + &other_ino, &other_parent);
5108 + if (ret < 0) {
5109 + return ret;
5110 + } else if (ret > 0 && ctx &&
5111 + other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5112 + if (ins_nr > 0) {
5113 + ins_nr++;
5114 + } else {
5115 + ins_nr = 1;
5116 + ins_start_slot = path->slots[0];
5117 + }
5118 + ret = copy_items(trans, inode, dst_path, path,
5119 + ins_start_slot, ins_nr,
5120 + inode_only, logged_isize);
5121 + if (ret < 0)
5122 + return ret;
5123 + ins_nr = 0;
5124 +
5125 + ret = log_conflicting_inodes(trans, root, path,
5126 + ctx, other_ino, other_parent);
5127 + if (ret)
5128 + return ret;
5129 + btrfs_release_path(path);
5130 + goto next_key;
5131 + }
5132 + }
5133 +
5134 + /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5135 + if (min_key->type == BTRFS_XATTR_ITEM_KEY) {
5136 + if (ins_nr == 0)
5137 + goto next_slot;
5138 + ret = copy_items(trans, inode, dst_path, path,
5139 + ins_start_slot,
5140 + ins_nr, inode_only, logged_isize);
5141 + if (ret < 0)
5142 + return ret;
5143 + ins_nr = 0;
5144 + goto next_slot;
5145 + }
5146 +
5147 + if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5148 + ins_nr++;
5149 + goto next_slot;
5150 + } else if (!ins_nr) {
5151 + ins_start_slot = path->slots[0];
5152 + ins_nr = 1;
5153 + goto next_slot;
5154 + }
5155 +
5156 + ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5157 + ins_nr, inode_only, logged_isize);
5158 + if (ret < 0)
5159 + return ret;
5160 + ins_nr = 1;
5161 + ins_start_slot = path->slots[0];
5162 +next_slot:
5163 + path->slots[0]++;
5164 + if (path->slots[0] < btrfs_header_nritems(path->nodes[0])) {
5165 + btrfs_item_key_to_cpu(path->nodes[0], min_key,
5166 + path->slots[0]);
5167 + goto again;
5168 + }
5169 + if (ins_nr) {
5170 + ret = copy_items(trans, inode, dst_path, path,
5171 + ins_start_slot, ins_nr, inode_only,
5172 + logged_isize);
5173 + if (ret < 0)
5174 + return ret;
5175 + ins_nr = 0;
5176 + }
5177 + btrfs_release_path(path);
5178 +next_key:
5179 + if (min_key->offset < (u64)-1) {
5180 + min_key->offset++;
5181 + } else if (min_key->type < max_key->type) {
5182 + min_key->type++;
5183 + min_key->offset = 0;
5184 + } else {
5185 + break;
5186 + }
5187 + }
5188 + if (ins_nr)
5189 + ret = copy_items(trans, inode, dst_path, path, ins_start_slot,
5190 + ins_nr, inode_only, logged_isize);
5191 +
5192 + return ret;
5193 +}
5194 +
5195 /* log a single inode in the tree log.
5196 * At least one parent directory for this inode must exist in the tree
5197 * or be logged already.
5198 @@ -5015,17 +5149,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5199 const loff_t end,
5200 struct btrfs_log_ctx *ctx)
5201 {
5202 - struct btrfs_fs_info *fs_info = root->fs_info;
5203 struct btrfs_path *path;
5204 struct btrfs_path *dst_path;
5205 struct btrfs_key min_key;
5206 struct btrfs_key max_key;
5207 struct btrfs_root *log = root->log_root;
5208 int err = 0;
5209 - int ret;
5210 - int nritems;
5211 - int ins_start_slot = 0;
5212 - int ins_nr;
5213 + int ret = 0;
5214 bool fast_search = false;
5215 u64 ino = btrfs_ino(inode);
5216 struct extent_map_tree *em_tree = &inode->extent_tree;
5217 @@ -5061,15 +5191,19 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5218 max_key.offset = (u64)-1;
5219
5220 /*
5221 - * Only run delayed items if we are a dir or a new file.
5222 - * Otherwise commit the delayed inode only, which is needed in
5223 - * order for the log replay code to mark inodes for link count
5224 - * fixup (create temporary BTRFS_TREE_LOG_FIXUP_OBJECTID items).
5225 + * Only run delayed items if we are a directory. We want to make sure
5226 + * all directory indexes hit the fs/subvolume tree so we can find them
5227 + * and figure out which index ranges have to be logged.
5228 + *
5229 + * Otherwise commit the delayed inode only if the full sync flag is set,
5230 + * as we want to make sure an up to date version is in the subvolume
5231 + * tree so copy_inode_items_to_log() / copy_items() can find it and copy
5232 + * it to the log tree. For a non full sync, we always log the inode item
5233 + * based on the in-memory struct btrfs_inode which is always up to date.
5234 */
5235 - if (S_ISDIR(inode->vfs_inode.i_mode) ||
5236 - inode->generation > fs_info->last_trans_committed)
5237 + if (S_ISDIR(inode->vfs_inode.i_mode))
5238 ret = btrfs_commit_inode_delayed_items(trans, inode);
5239 - else
5240 + else if (test_bit(BTRFS_INODE_NEEDS_FULL_SYNC, &inode->runtime_flags))
5241 ret = btrfs_commit_inode_delayed_inode(inode);
5242
5243 if (ret) {
5244 @@ -5156,139 +5290,12 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
5245 goto out_unlock;
5246 }
5247
5248 - while (1) {
5249 - ins_nr = 0;
5250 - ret = btrfs_search_forward(root, &min_key,
5251 - path, trans->transid);
5252 - if (ret < 0) {
5253 - err = ret;
5254 - goto out_unlock;
5255 - }
5256 - if (ret != 0)
5257 - break;
5258 -again:
5259 - /* note, ins_nr might be > 0 here, cleanup outside the loop */
5260 - if (min_key.objectid != ino)
5261 - break;
5262 - if (min_key.type > max_key.type)
5263 - break;
5264 -
5265 - if (min_key.type == BTRFS_INODE_ITEM_KEY)
5266 - need_log_inode_item = false;
5267 -
5268 - if ((min_key.type == BTRFS_INODE_REF_KEY ||
5269 - min_key.type == BTRFS_INODE_EXTREF_KEY) &&
5270 - inode->generation == trans->transid &&
5271 - !recursive_logging) {
5272 - u64 other_ino = 0;
5273 - u64 other_parent = 0;
5274 -
5275 - ret = btrfs_check_ref_name_override(path->nodes[0],
5276 - path->slots[0], &min_key, inode,
5277 - &other_ino, &other_parent);
5278 - if (ret < 0) {
5279 - err = ret;
5280 - goto out_unlock;
5281 - } else if (ret > 0 && ctx &&
5282 - other_ino != btrfs_ino(BTRFS_I(ctx->inode))) {
5283 - if (ins_nr > 0) {
5284 - ins_nr++;
5285 - } else {
5286 - ins_nr = 1;
5287 - ins_start_slot = path->slots[0];
5288 - }
5289 - ret = copy_items(trans, inode, dst_path, path,
5290 - ins_start_slot,
5291 - ins_nr, inode_only,
5292 - logged_isize);
5293 - if (ret < 0) {
5294 - err = ret;
5295 - goto out_unlock;
5296 - }
5297 - ins_nr = 0;
5298 -
5299 - err = log_conflicting_inodes(trans, root, path,
5300 - ctx, other_ino, other_parent);
5301 - if (err)
5302 - goto out_unlock;
5303 - btrfs_release_path(path);
5304 - goto next_key;
5305 - }
5306 - }
5307 -
5308 - /* Skip xattrs, we log them later with btrfs_log_all_xattrs() */
5309 - if (min_key.type == BTRFS_XATTR_ITEM_KEY) {
5310 - if (ins_nr == 0)
5311 - goto next_slot;
5312 - ret = copy_items(trans, inode, dst_path, path,
5313 - ins_start_slot,
5314 - ins_nr, inode_only, logged_isize);
5315 - if (ret < 0) {
5316 - err = ret;
5317 - goto out_unlock;
5318 - }
5319 - ins_nr = 0;
5320 - goto next_slot;
5321 - }
5322 -
5323 - if (ins_nr && ins_start_slot + ins_nr == path->slots[0]) {
5324 - ins_nr++;
5325 - goto next_slot;
5326 - } else if (!ins_nr) {
5327 - ins_start_slot = path->slots[0];
5328 - ins_nr = 1;
5329 - goto next_slot;
5330 - }
5331 -
5332 - ret = copy_items(trans, inode, dst_path, path,
5333 - ins_start_slot, ins_nr, inode_only,
5334 - logged_isize);
5335 - if (ret < 0) {
5336 - err = ret;
5337 - goto out_unlock;
5338 - }
5339 - ins_nr = 1;
5340 - ins_start_slot = path->slots[0];
5341 -next_slot:
5342 -
5343 - nritems = btrfs_header_nritems(path->nodes[0]);
5344 - path->slots[0]++;
5345 - if (path->slots[0] < nritems) {
5346 - btrfs_item_key_to_cpu(path->nodes[0], &min_key,
5347 - path->slots[0]);
5348 - goto again;
5349 - }
5350 - if (ins_nr) {
5351 - ret = copy_items(trans, inode, dst_path, path,
5352 - ins_start_slot,
5353 - ins_nr, inode_only, logged_isize);
5354 - if (ret < 0) {
5355 - err = ret;
5356 - goto out_unlock;
5357 - }
5358 - ins_nr = 0;
5359 - }
5360 - btrfs_release_path(path);
5361 -next_key:
5362 - if (min_key.offset < (u64)-1) {
5363 - min_key.offset++;
5364 - } else if (min_key.type < max_key.type) {
5365 - min_key.type++;
5366 - min_key.offset = 0;
5367 - } else {
5368 - break;
5369 - }
5370 - }
5371 - if (ins_nr) {
5372 - ret = copy_items(trans, inode, dst_path, path,
5373 - ins_start_slot, ins_nr, inode_only,
5374 - logged_isize);
5375 - if (ret < 0) {
5376 - err = ret;
5377 - goto out_unlock;
5378 - }
5379 - ins_nr = 0;
5380 - }
5381 + err = copy_inode_items_to_log(trans, inode, &min_key, &max_key,
5382 + path, dst_path, logged_isize,
5383 + recursive_logging, inode_only, ctx,
5384 + &need_log_inode_item);
5385 + if (err)
5386 + goto out_unlock;
5387
5388 btrfs_release_path(path);
5389 btrfs_release_path(dst_path);
5390 diff --git a/fs/buffer.c b/fs/buffer.c
5391 index 79c9562434a8d..22d8ac4a8c40a 100644
5392 --- a/fs/buffer.c
5393 +++ b/fs/buffer.c
5394 @@ -3170,6 +3170,15 @@ int __sync_dirty_buffer(struct buffer_head *bh, int op_flags)
5395 WARN_ON(atomic_read(&bh->b_count) < 1);
5396 lock_buffer(bh);
5397 if (test_clear_buffer_dirty(bh)) {
5398 + /*
5399 + * The bh should be mapped, but it might not be if the
5400 + * device was hot-removed. Not much we can do but fail the I/O.
5401 + */
5402 + if (!buffer_mapped(bh)) {
5403 + unlock_buffer(bh);
5404 + return -EIO;
5405 + }
5406 +
5407 get_bh(bh);
5408 bh->b_end_io = end_buffer_write_sync;
5409 ret = submit_bh(REQ_OP_WRITE, op_flags, bh);
5410 diff --git a/fs/ceph/file.c b/fs/ceph/file.c
5411 index ce54a1b12819b..4a6b14a2bd7f9 100644
5412 --- a/fs/ceph/file.c
5413 +++ b/fs/ceph/file.c
5414 @@ -1260,6 +1260,7 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
5415 struct inode *inode = file_inode(filp);
5416 struct ceph_inode_info *ci = ceph_inode(inode);
5417 struct page *pinned_page = NULL;
5418 + bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
5419 ssize_t ret;
5420 int want, got = 0;
5421 int retry_op = 0, read = 0;
5422 @@ -1268,7 +1269,7 @@ again:
5423 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
5424 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
5425
5426 - if (iocb->ki_flags & IOCB_DIRECT)
5427 + if (direct_lock)
5428 ceph_start_io_direct(inode);
5429 else
5430 ceph_start_io_read(inode);
5431 @@ -1325,7 +1326,7 @@ again:
5432 }
5433 ceph_put_cap_refs(ci, got);
5434
5435 - if (iocb->ki_flags & IOCB_DIRECT)
5436 + if (direct_lock)
5437 ceph_end_io_direct(inode);
5438 else
5439 ceph_end_io_read(inode);
5440 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
5441 index b0077f5a31688..0f21073a51a1b 100644
5442 --- a/fs/ceph/mds_client.c
5443 +++ b/fs/ceph/mds_client.c
5444 @@ -4068,6 +4068,9 @@ static void delayed_work(struct work_struct *work)
5445
5446 dout("mdsc delayed_work\n");
5447
5448 + if (mdsc->stopping)
5449 + return;
5450 +
5451 mutex_lock(&mdsc->mutex);
5452 renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
5453 renew_caps = time_after_eq(jiffies, HZ*renew_interval +
5454 @@ -4433,7 +4436,16 @@ void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
5455 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
5456 {
5457 dout("stop\n");
5458 - cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
5459 + /*
5460 + * Make sure the delayed work stopped before releasing
5461 + * the resources.
5462 + *
5463 + * Because the cancel_delayed_work_sync() will only
5464 + * guarantee that the work finishes executing. But the
5465 + * delayed work will re-arm itself again after that.
5466 + */
5467 + flush_delayed_work(&mdsc->delayed_work);
5468 +
5469 if (mdsc->mdsmap)
5470 ceph_mdsmap_destroy(mdsc->mdsmap);
5471 kfree(mdsc->sessions);
5472 diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
5473 index ceb54ccc937e9..97c56d061e615 100644
5474 --- a/fs/ext4/block_validity.c
5475 +++ b/fs/ext4/block_validity.c
5476 @@ -250,14 +250,6 @@ int ext4_setup_system_zone(struct super_block *sb)
5477 int flex_size = ext4_flex_bg_size(sbi);
5478 int ret;
5479
5480 - if (!test_opt(sb, BLOCK_VALIDITY)) {
5481 - if (sbi->system_blks)
5482 - ext4_release_system_zone(sb);
5483 - return 0;
5484 - }
5485 - if (sbi->system_blks)
5486 - return 0;
5487 -
5488 system_blks = kzalloc(sizeof(*system_blks), GFP_KERNEL);
5489 if (!system_blks)
5490 return -ENOMEM;
5491 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
5492 index f7c20bb20da37..4aae7e3e89a12 100644
5493 --- a/fs/ext4/super.c
5494 +++ b/fs/ext4/super.c
5495 @@ -66,10 +66,10 @@ static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
5496 unsigned long journal_devnum);
5497 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
5498 static int ext4_commit_super(struct super_block *sb, int sync);
5499 -static void ext4_mark_recovery_complete(struct super_block *sb,
5500 +static int ext4_mark_recovery_complete(struct super_block *sb,
5501 struct ext4_super_block *es);
5502 -static void ext4_clear_journal_err(struct super_block *sb,
5503 - struct ext4_super_block *es);
5504 +static int ext4_clear_journal_err(struct super_block *sb,
5505 + struct ext4_super_block *es);
5506 static int ext4_sync_fs(struct super_block *sb, int wait);
5507 static int ext4_remount(struct super_block *sb, int *flags, char *data);
5508 static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
5509 @@ -4563,11 +4563,13 @@ no_journal:
5510
5511 ext4_set_resv_clusters(sb);
5512
5513 - err = ext4_setup_system_zone(sb);
5514 - if (err) {
5515 - ext4_msg(sb, KERN_ERR, "failed to initialize system "
5516 - "zone (%d)", err);
5517 - goto failed_mount4a;
5518 + if (test_opt(sb, BLOCK_VALIDITY)) {
5519 + err = ext4_setup_system_zone(sb);
5520 + if (err) {
5521 + ext4_msg(sb, KERN_ERR, "failed to initialize system "
5522 + "zone (%d)", err);
5523 + goto failed_mount4a;
5524 + }
5525 }
5526
5527 ext4_ext_init(sb);
5528 @@ -4635,7 +4637,9 @@ no_journal:
5529 EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
5530 if (needs_recovery) {
5531 ext4_msg(sb, KERN_INFO, "recovery complete");
5532 - ext4_mark_recovery_complete(sb, es);
5533 + err = ext4_mark_recovery_complete(sb, es);
5534 + if (err)
5535 + goto failed_mount8;
5536 }
5537 if (EXT4_SB(sb)->s_journal) {
5538 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
5539 @@ -4678,10 +4682,8 @@ cantfind_ext4:
5540 ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
5541 goto failed_mount;
5542
5543 -#ifdef CONFIG_QUOTA
5544 failed_mount8:
5545 ext4_unregister_sysfs(sb);
5546 -#endif
5547 failed_mount7:
5548 ext4_unregister_li_request(sb);
5549 failed_mount6:
5550 @@ -4820,7 +4822,8 @@ static journal_t *ext4_get_journal(struct super_block *sb,
5551 struct inode *journal_inode;
5552 journal_t *journal;
5553
5554 - BUG_ON(!ext4_has_feature_journal(sb));
5555 + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
5556 + return NULL;
5557
5558 journal_inode = ext4_get_journal_inode(sb, journal_inum);
5559 if (!journal_inode)
5560 @@ -4850,7 +4853,8 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
5561 struct ext4_super_block *es;
5562 struct block_device *bdev;
5563
5564 - BUG_ON(!ext4_has_feature_journal(sb));
5565 + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
5566 + return NULL;
5567
5568 bdev = ext4_blkdev_get(j_dev, sb);
5569 if (bdev == NULL)
5570 @@ -4941,8 +4945,10 @@ static int ext4_load_journal(struct super_block *sb,
5571 dev_t journal_dev;
5572 int err = 0;
5573 int really_read_only;
5574 + int journal_dev_ro;
5575
5576 - BUG_ON(!ext4_has_feature_journal(sb));
5577 + if (WARN_ON_ONCE(!ext4_has_feature_journal(sb)))
5578 + return -EFSCORRUPTED;
5579
5580 if (journal_devnum &&
5581 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
5582 @@ -4952,7 +4958,31 @@ static int ext4_load_journal(struct super_block *sb,
5583 } else
5584 journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
5585
5586 - really_read_only = bdev_read_only(sb->s_bdev);
5587 + if (journal_inum && journal_dev) {
5588 + ext4_msg(sb, KERN_ERR,
5589 + "filesystem has both journal inode and journal device!");
5590 + return -EINVAL;
5591 + }
5592 +
5593 + if (journal_inum) {
5594 + journal = ext4_get_journal(sb, journal_inum);
5595 + if (!journal)
5596 + return -EINVAL;
5597 + } else {
5598 + journal = ext4_get_dev_journal(sb, journal_dev);
5599 + if (!journal)
5600 + return -EINVAL;
5601 + }
5602 +
5603 + journal_dev_ro = bdev_read_only(journal->j_dev);
5604 + really_read_only = bdev_read_only(sb->s_bdev) | journal_dev_ro;
5605 +
5606 + if (journal_dev_ro && !sb_rdonly(sb)) {
5607 + ext4_msg(sb, KERN_ERR,
5608 + "journal device read-only, try mounting with '-o ro'");
5609 + err = -EROFS;
5610 + goto err_out;
5611 + }
5612
5613 /*
5614 * Are we loading a blank journal or performing recovery after a
5615 @@ -4967,27 +4997,14 @@ static int ext4_load_journal(struct super_block *sb,
5616 ext4_msg(sb, KERN_ERR, "write access "
5617 "unavailable, cannot proceed "
5618 "(try mounting with noload)");
5619 - return -EROFS;
5620 + err = -EROFS;
5621 + goto err_out;
5622 }
5623 ext4_msg(sb, KERN_INFO, "write access will "
5624 "be enabled during recovery");
5625 }
5626 }
5627
5628 - if (journal_inum && journal_dev) {
5629 - ext4_msg(sb, KERN_ERR, "filesystem has both journal "
5630 - "and inode journals!");
5631 - return -EINVAL;
5632 - }
5633 -
5634 - if (journal_inum) {
5635 - if (!(journal = ext4_get_journal(sb, journal_inum)))
5636 - return -EINVAL;
5637 - } else {
5638 - if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
5639 - return -EINVAL;
5640 - }
5641 -
5642 if (!(journal->j_flags & JBD2_BARRIER))
5643 ext4_msg(sb, KERN_INFO, "barriers disabled");
5644
5645 @@ -5007,12 +5024,16 @@ static int ext4_load_journal(struct super_block *sb,
5646
5647 if (err) {
5648 ext4_msg(sb, KERN_ERR, "error loading journal");
5649 - jbd2_journal_destroy(journal);
5650 - return err;
5651 + goto err_out;
5652 }
5653
5654 EXT4_SB(sb)->s_journal = journal;
5655 - ext4_clear_journal_err(sb, es);
5656 + err = ext4_clear_journal_err(sb, es);
5657 + if (err) {
5658 + EXT4_SB(sb)->s_journal = NULL;
5659 + jbd2_journal_destroy(journal);
5660 + return err;
5661 + }
5662
5663 if (!really_read_only && journal_devnum &&
5664 journal_devnum != le32_to_cpu(es->s_journal_dev)) {
5665 @@ -5023,6 +5044,10 @@ static int ext4_load_journal(struct super_block *sb,
5666 }
5667
5668 return 0;
5669 +
5670 +err_out:
5671 + jbd2_journal_destroy(journal);
5672 + return err;
5673 }
5674
5675 static int ext4_commit_super(struct super_block *sb, int sync)
5676 @@ -5034,13 +5059,6 @@ static int ext4_commit_super(struct super_block *sb, int sync)
5677 if (!sbh || block_device_ejected(sb))
5678 return error;
5679
5680 - /*
5681 - * The superblock bh should be mapped, but it might not be if the
5682 - * device was hot-removed. Not much we can do but fail the I/O.
5683 - */
5684 - if (!buffer_mapped(sbh))
5685 - return error;
5686 -
5687 /*
5688 * If the file system is mounted read-only, don't update the
5689 * superblock write time. This avoids updating the superblock
5690 @@ -5108,26 +5126,32 @@ static int ext4_commit_super(struct super_block *sb, int sync)
5691 * remounting) the filesystem readonly, then we will end up with a
5692 * consistent fs on disk. Record that fact.
5693 */
5694 -static void ext4_mark_recovery_complete(struct super_block *sb,
5695 - struct ext4_super_block *es)
5696 +static int ext4_mark_recovery_complete(struct super_block *sb,
5697 + struct ext4_super_block *es)
5698 {
5699 + int err;
5700 journal_t *journal = EXT4_SB(sb)->s_journal;
5701
5702 if (!ext4_has_feature_journal(sb)) {
5703 - BUG_ON(journal != NULL);
5704 - return;
5705 + if (journal != NULL) {
5706 + ext4_error(sb, "Journal got removed while the fs was "
5707 + "mounted!");
5708 + return -EFSCORRUPTED;
5709 + }
5710 + return 0;
5711 }
5712 jbd2_journal_lock_updates(journal);
5713 - if (jbd2_journal_flush(journal) < 0)
5714 + err = jbd2_journal_flush(journal);
5715 + if (err < 0)
5716 goto out;
5717
5718 if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
5719 ext4_clear_feature_journal_needs_recovery(sb);
5720 ext4_commit_super(sb, 1);
5721 }
5722 -
5723 out:
5724 jbd2_journal_unlock_updates(journal);
5725 + return err;
5726 }
5727
5728 /*
5729 @@ -5135,14 +5159,17 @@ out:
5730 * has recorded an error from a previous lifetime, move that error to the
5731 * main filesystem now.
5732 */
5733 -static void ext4_clear_journal_err(struct super_block *sb,
5734 +static int ext4_clear_journal_err(struct super_block *sb,
5735 struct ext4_super_block *es)
5736 {
5737 journal_t *journal;
5738 int j_errno;
5739 const char *errstr;
5740
5741 - BUG_ON(!ext4_has_feature_journal(sb));
5742 + if (!ext4_has_feature_journal(sb)) {
5743 + ext4_error(sb, "Journal got removed while the fs was mounted!");
5744 + return -EFSCORRUPTED;
5745 + }
5746
5747 journal = EXT4_SB(sb)->s_journal;
5748
5749 @@ -5167,6 +5194,7 @@ static void ext4_clear_journal_err(struct super_block *sb,
5750 jbd2_journal_clear_err(journal);
5751 jbd2_journal_update_sb_errno(journal);
5752 }
5753 + return 0;
5754 }
5755
5756 /*
5757 @@ -5309,7 +5337,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5758 {
5759 struct ext4_super_block *es;
5760 struct ext4_sb_info *sbi = EXT4_SB(sb);
5761 - unsigned long old_sb_flags;
5762 + unsigned long old_sb_flags, vfs_flags;
5763 struct ext4_mount_options old_opts;
5764 int enable_quota = 0;
5765 ext4_group_t g;
5766 @@ -5352,6 +5380,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5767 if (sbi->s_journal && sbi->s_journal->j_task->io_context)
5768 journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
5769
5770 + /*
5771 + * Some options can be enabled by ext4 and/or by VFS mount flag
5772 + * either way we need to make sure it matches in both *flags and
5773 + * s_flags. Copy those selected flags from *flags to s_flags
5774 + */
5775 + vfs_flags = SB_LAZYTIME | SB_I_VERSION;
5776 + sb->s_flags = (sb->s_flags & ~vfs_flags) | (*flags & vfs_flags);
5777 +
5778 if (!parse_options(data, sb, NULL, &journal_ioprio, 1)) {
5779 err = -EINVAL;
5780 goto restore_opts;
5781 @@ -5405,9 +5441,6 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5782 set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
5783 }
5784
5785 - if (*flags & SB_LAZYTIME)
5786 - sb->s_flags |= SB_LAZYTIME;
5787 -
5788 if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
5789 if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
5790 err = -EROFS;
5791 @@ -5437,8 +5470,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5792 (sbi->s_mount_state & EXT4_VALID_FS))
5793 es->s_state = cpu_to_le16(sbi->s_mount_state);
5794
5795 - if (sbi->s_journal)
5796 + if (sbi->s_journal) {
5797 + /*
5798 + * We let remount-ro finish even if marking fs
5799 + * as clean failed...
5800 + */
5801 ext4_mark_recovery_complete(sb, es);
5802 + }
5803 if (sbi->s_mmp_tsk)
5804 kthread_stop(sbi->s_mmp_tsk);
5805 } else {
5806 @@ -5486,8 +5524,11 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5807 * been changed by e2fsck since we originally mounted
5808 * the partition.)
5809 */
5810 - if (sbi->s_journal)
5811 - ext4_clear_journal_err(sb, es);
5812 + if (sbi->s_journal) {
5813 + err = ext4_clear_journal_err(sb, es);
5814 + if (err)
5815 + goto restore_opts;
5816 + }
5817 sbi->s_mount_state = le16_to_cpu(es->s_state);
5818
5819 err = ext4_setup_super(sb, es, 0);
5820 @@ -5517,7 +5558,17 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5821 ext4_register_li_request(sb, first_not_zeroed);
5822 }
5823
5824 - ext4_setup_system_zone(sb);
5825 + /*
5826 + * Handle creation of system zone data early because it can fail.
5827 + * Releasing of existing data is done when we are sure remount will
5828 + * succeed.
5829 + */
5830 + if (test_opt(sb, BLOCK_VALIDITY) && !sbi->system_blks) {
5831 + err = ext4_setup_system_zone(sb);
5832 + if (err)
5833 + goto restore_opts;
5834 + }
5835 +
5836 if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
5837 err = ext4_commit_super(sb, 1);
5838 if (err)
5839 @@ -5538,8 +5589,16 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
5840 }
5841 }
5842 #endif
5843 + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
5844 + ext4_release_system_zone(sb);
5845 +
5846 + /*
5847 + * Some options can be enabled by ext4 and/or by VFS mount flag
5848 + * either way we need to make sure it matches in both *flags and
5849 + * s_flags. Copy those selected flags from s_flags to *flags
5850 + */
5851 + *flags = (*flags & ~vfs_flags) | (sb->s_flags & vfs_flags);
5852
5853 - *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
5854 ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
5855 kfree(orig_data);
5856 return 0;
5857 @@ -5553,6 +5612,8 @@ restore_opts:
5858 sbi->s_commit_interval = old_opts.s_commit_interval;
5859 sbi->s_min_batch_time = old_opts.s_min_batch_time;
5860 sbi->s_max_batch_time = old_opts.s_max_batch_time;
5861 + if (!test_opt(sb, BLOCK_VALIDITY) && sbi->system_blks)
5862 + ext4_release_system_zone(sb);
5863 #ifdef CONFIG_QUOTA
5864 sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
5865 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
5866 diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
5867 index 03693d6b1c104..b3b7e63394be7 100644
5868 --- a/fs/f2fs/f2fs.h
5869 +++ b/fs/f2fs/f2fs.h
5870 @@ -3061,7 +3061,7 @@ bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
5871 void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
5872 void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
5873 int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink);
5874 -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
5875 +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
5876 int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
5877 int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
5878 int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
5879 @@ -3487,7 +3487,7 @@ int f2fs_read_inline_data(struct inode *inode, struct page *page);
5880 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page);
5881 int f2fs_convert_inline_inode(struct inode *inode);
5882 int f2fs_write_inline_data(struct inode *inode, struct page *page);
5883 -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage);
5884 +int f2fs_recover_inline_data(struct inode *inode, struct page *npage);
5885 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
5886 struct fscrypt_name *fname, struct page **res_page);
5887 int f2fs_make_empty_inline_dir(struct inode *inode, struct inode *parent,
5888 diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
5889 index 896db0416f0e6..183388393c6a8 100644
5890 --- a/fs/f2fs/inline.c
5891 +++ b/fs/f2fs/inline.c
5892 @@ -252,7 +252,7 @@ int f2fs_write_inline_data(struct inode *inode, struct page *page)
5893 return 0;
5894 }
5895
5896 -bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
5897 +int f2fs_recover_inline_data(struct inode *inode, struct page *npage)
5898 {
5899 struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
5900 struct f2fs_inode *ri = NULL;
5901 @@ -274,7 +274,8 @@ bool f2fs_recover_inline_data(struct inode *inode, struct page *npage)
5902 ri && (ri->i_inline & F2FS_INLINE_DATA)) {
5903 process_inline:
5904 ipage = f2fs_get_node_page(sbi, inode->i_ino);
5905 - f2fs_bug_on(sbi, IS_ERR(ipage));
5906 + if (IS_ERR(ipage))
5907 + return PTR_ERR(ipage);
5908
5909 f2fs_wait_on_page_writeback(ipage, NODE, true, true);
5910
5911 @@ -287,21 +288,25 @@ process_inline:
5912
5913 set_page_dirty(ipage);
5914 f2fs_put_page(ipage, 1);
5915 - return true;
5916 + return 1;
5917 }
5918
5919 if (f2fs_has_inline_data(inode)) {
5920 ipage = f2fs_get_node_page(sbi, inode->i_ino);
5921 - f2fs_bug_on(sbi, IS_ERR(ipage));
5922 + if (IS_ERR(ipage))
5923 + return PTR_ERR(ipage);
5924 f2fs_truncate_inline_inode(inode, ipage, 0);
5925 clear_inode_flag(inode, FI_INLINE_DATA);
5926 f2fs_put_page(ipage, 1);
5927 } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) {
5928 - if (f2fs_truncate_blocks(inode, 0, false))
5929 - return false;
5930 + int ret;
5931 +
5932 + ret = f2fs_truncate_blocks(inode, 0, false);
5933 + if (ret)
5934 + return ret;
5935 goto process_inline;
5936 }
5937 - return false;
5938 + return 0;
5939 }
5940
5941 struct f2fs_dir_entry *f2fs_find_in_inline_dir(struct inode *dir,
5942 diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
5943 index 90a20bd129614..daeac4268c1ab 100644
5944 --- a/fs/f2fs/node.c
5945 +++ b/fs/f2fs/node.c
5946 @@ -2512,7 +2512,7 @@ int f2fs_try_to_free_nids(struct f2fs_sb_info *sbi, int nr_shrink)
5947 return nr - nr_shrink;
5948 }
5949
5950 -void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
5951 +int f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
5952 {
5953 void *src_addr, *dst_addr;
5954 size_t inline_size;
5955 @@ -2520,7 +2520,8 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
5956 struct f2fs_inode *ri;
5957
5958 ipage = f2fs_get_node_page(F2FS_I_SB(inode), inode->i_ino);
5959 - f2fs_bug_on(F2FS_I_SB(inode), IS_ERR(ipage));
5960 + if (IS_ERR(ipage))
5961 + return PTR_ERR(ipage);
5962
5963 ri = F2FS_INODE(page);
5964 if (ri->i_inline & F2FS_INLINE_XATTR) {
5965 @@ -2539,6 +2540,7 @@ void f2fs_recover_inline_xattr(struct inode *inode, struct page *page)
5966 update_inode:
5967 f2fs_update_inode(inode, ipage);
5968 f2fs_put_page(ipage, 1);
5969 + return 0;
5970 }
5971
5972 int f2fs_recover_xattr_data(struct inode *inode, struct page *page)
5973 diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
5974 index 783773e4560de..5f230e981c483 100644
5975 --- a/fs/f2fs/recovery.c
5976 +++ b/fs/f2fs/recovery.c
5977 @@ -514,7 +514,9 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
5978
5979 /* step 1: recover xattr */
5980 if (IS_INODE(page)) {
5981 - f2fs_recover_inline_xattr(inode, page);
5982 + err = f2fs_recover_inline_xattr(inode, page);
5983 + if (err)
5984 + goto out;
5985 } else if (f2fs_has_xattr_block(ofs_of_node(page))) {
5986 err = f2fs_recover_xattr_data(inode, page);
5987 if (!err)
5988 @@ -523,8 +525,12 @@ static int do_recover_data(struct f2fs_sb_info *sbi, struct inode *inode,
5989 }
5990
5991 /* step 2: recover inline data */
5992 - if (f2fs_recover_inline_data(inode, page))
5993 + err = f2fs_recover_inline_data(inode, page);
5994 + if (err) {
5995 + if (err == 1)
5996 + err = 0;
5997 goto out;
5998 + }
5999
6000 /* step 3: recover data indices */
6001 start = f2fs_start_bidx_of_node(ofs_of_node(page), inode);
6002 diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
6003 index f4b882ee48ddf..fa461db696e79 100644
6004 --- a/fs/f2fs/super.c
6005 +++ b/fs/f2fs/super.c
6006 @@ -1075,6 +1075,9 @@ static void f2fs_put_super(struct super_block *sb)
6007 int i;
6008 bool dropped;
6009
6010 + /* unregister procfs/sysfs entries in advance to avoid race case */
6011 + f2fs_unregister_sysfs(sbi);
6012 +
6013 f2fs_quota_off_umount(sb);
6014
6015 /* prevent remaining shrinker jobs */
6016 @@ -1138,8 +1141,6 @@ static void f2fs_put_super(struct super_block *sb)
6017
6018 kvfree(sbi->ckpt);
6019
6020 - f2fs_unregister_sysfs(sbi);
6021 -
6022 sb->s_fs_info = NULL;
6023 if (sbi->s_chksum_driver)
6024 crypto_free_shash(sbi->s_chksum_driver);
6025 diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
6026 index 76ac9c7d32ec7..5f6400ba82c00 100644
6027 --- a/fs/fs-writeback.c
6028 +++ b/fs/fs-writeback.c
6029 @@ -42,7 +42,6 @@
6030 struct wb_writeback_work {
6031 long nr_pages;
6032 struct super_block *sb;
6033 - unsigned long *older_than_this;
6034 enum writeback_sync_modes sync_mode;
6035 unsigned int tagged_writepages:1;
6036 unsigned int for_kupdate:1;
6037 @@ -144,7 +143,9 @@ static void inode_io_list_del_locked(struct inode *inode,
6038 struct bdi_writeback *wb)
6039 {
6040 assert_spin_locked(&wb->list_lock);
6041 + assert_spin_locked(&inode->i_lock);
6042
6043 + inode->i_state &= ~I_SYNC_QUEUED;
6044 list_del_init(&inode->i_io_list);
6045 wb_io_lists_depopulated(wb);
6046 }
6047 @@ -1123,7 +1124,9 @@ void inode_io_list_del(struct inode *inode)
6048 struct bdi_writeback *wb;
6049
6050 wb = inode_to_wb_and_lock_list(inode);
6051 + spin_lock(&inode->i_lock);
6052 inode_io_list_del_locked(inode, wb);
6053 + spin_unlock(&inode->i_lock);
6054 spin_unlock(&wb->list_lock);
6055 }
6056
6057 @@ -1172,8 +1175,10 @@ void sb_clear_inode_writeback(struct inode *inode)
6058 * the case then the inode must have been redirtied while it was being written
6059 * out and we don't reset its dirtied_when.
6060 */
6061 -static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
6062 +static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
6063 {
6064 + assert_spin_locked(&inode->i_lock);
6065 +
6066 if (!list_empty(&wb->b_dirty)) {
6067 struct inode *tail;
6068
6069 @@ -1182,6 +1187,14 @@ static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
6070 inode->dirtied_when = jiffies;
6071 }
6072 inode_io_list_move_locked(inode, wb, &wb->b_dirty);
6073 + inode->i_state &= ~I_SYNC_QUEUED;
6074 +}
6075 +
6076 +static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
6077 +{
6078 + spin_lock(&inode->i_lock);
6079 + redirty_tail_locked(inode, wb);
6080 + spin_unlock(&inode->i_lock);
6081 }
6082
6083 /*
6084 @@ -1220,16 +1233,13 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t)
6085 #define EXPIRE_DIRTY_ATIME 0x0001
6086
6087 /*
6088 - * Move expired (dirtied before work->older_than_this) dirty inodes from
6089 + * Move expired (dirtied before dirtied_before) dirty inodes from
6090 * @delaying_queue to @dispatch_queue.
6091 */
6092 static int move_expired_inodes(struct list_head *delaying_queue,
6093 struct list_head *dispatch_queue,
6094 - int flags,
6095 - struct wb_writeback_work *work)
6096 + int flags, unsigned long dirtied_before)
6097 {
6098 - unsigned long *older_than_this = NULL;
6099 - unsigned long expire_time;
6100 LIST_HEAD(tmp);
6101 struct list_head *pos, *node;
6102 struct super_block *sb = NULL;
6103 @@ -1237,21 +1247,17 @@ static int move_expired_inodes(struct list_head *delaying_queue,
6104 int do_sb_sort = 0;
6105 int moved = 0;
6106
6107 - if ((flags & EXPIRE_DIRTY_ATIME) == 0)
6108 - older_than_this = work->older_than_this;
6109 - else if (!work->for_sync) {
6110 - expire_time = jiffies - (dirtytime_expire_interval * HZ);
6111 - older_than_this = &expire_time;
6112 - }
6113 while (!list_empty(delaying_queue)) {
6114 inode = wb_inode(delaying_queue->prev);
6115 - if (older_than_this &&
6116 - inode_dirtied_after(inode, *older_than_this))
6117 + if (inode_dirtied_after(inode, dirtied_before))
6118 break;
6119 list_move(&inode->i_io_list, &tmp);
6120 moved++;
6121 + spin_lock(&inode->i_lock);
6122 if (flags & EXPIRE_DIRTY_ATIME)
6123 - set_bit(__I_DIRTY_TIME_EXPIRED, &inode->i_state);
6124 + inode->i_state |= I_DIRTY_TIME_EXPIRED;
6125 + inode->i_state |= I_SYNC_QUEUED;
6126 + spin_unlock(&inode->i_lock);
6127 if (sb_is_blkdev_sb(inode->i_sb))
6128 continue;
6129 if (sb && sb != inode->i_sb)
6130 @@ -1289,18 +1295,22 @@ out:
6131 * |
6132 * +--> dequeue for IO
6133 */
6134 -static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
6135 +static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
6136 + unsigned long dirtied_before)
6137 {
6138 int moved;
6139 + unsigned long time_expire_jif = dirtied_before;
6140
6141 assert_spin_locked(&wb->list_lock);
6142 list_splice_init(&wb->b_more_io, &wb->b_io);
6143 - moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, work);
6144 + moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, 0, dirtied_before);
6145 + if (!work->for_sync)
6146 + time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
6147 moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
6148 - EXPIRE_DIRTY_ATIME, work);
6149 + EXPIRE_DIRTY_ATIME, time_expire_jif);
6150 if (moved)
6151 wb_io_lists_populated(wb);
6152 - trace_writeback_queue_io(wb, work, moved);
6153 + trace_writeback_queue_io(wb, work, dirtied_before, moved);
6154 }
6155
6156 static int write_inode(struct inode *inode, struct writeback_control *wbc)
6157 @@ -1394,7 +1404,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
6158 * writeback is not making progress due to locked
6159 * buffers. Skip this inode for now.
6160 */
6161 - redirty_tail(inode, wb);
6162 + redirty_tail_locked(inode, wb);
6163 return;
6164 }
6165
6166 @@ -1414,7 +1424,7 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
6167 * retrying writeback of the dirty page/inode
6168 * that cannot be performed immediately.
6169 */
6170 - redirty_tail(inode, wb);
6171 + redirty_tail_locked(inode, wb);
6172 }
6173 } else if (inode->i_state & I_DIRTY) {
6174 /*
6175 @@ -1422,10 +1432,11 @@ static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
6176 * such as delayed allocation during submission or metadata
6177 * updates after data IO completion.
6178 */
6179 - redirty_tail(inode, wb);
6180 + redirty_tail_locked(inode, wb);
6181 } else if (inode->i_state & I_DIRTY_TIME) {
6182 inode->dirtied_when = jiffies;
6183 inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
6184 + inode->i_state &= ~I_SYNC_QUEUED;
6185 } else {
6186 /* The inode is clean. Remove from writeback lists. */
6187 inode_io_list_del_locked(inode, wb);
6188 @@ -1669,8 +1680,8 @@ static long writeback_sb_inodes(struct super_block *sb,
6189 */
6190 spin_lock(&inode->i_lock);
6191 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
6192 + redirty_tail_locked(inode, wb);
6193 spin_unlock(&inode->i_lock);
6194 - redirty_tail(inode, wb);
6195 continue;
6196 }
6197 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
6198 @@ -1811,7 +1822,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
6199 blk_start_plug(&plug);
6200 spin_lock(&wb->list_lock);
6201 if (list_empty(&wb->b_io))
6202 - queue_io(wb, &work);
6203 + queue_io(wb, &work, jiffies);
6204 __writeback_inodes_wb(wb, &work);
6205 spin_unlock(&wb->list_lock);
6206 blk_finish_plug(&plug);
6207 @@ -1831,7 +1842,7 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
6208 * takes longer than a dirty_writeback_interval interval, then leave a
6209 * one-second gap.
6210 *
6211 - * older_than_this takes precedence over nr_to_write. So we'll only write back
6212 + * dirtied_before takes precedence over nr_to_write. So we'll only write back
6213 * all dirty pages if they are all attached to "old" mappings.
6214 */
6215 static long wb_writeback(struct bdi_writeback *wb,
6216 @@ -1839,14 +1850,11 @@ static long wb_writeback(struct bdi_writeback *wb,
6217 {
6218 unsigned long wb_start = jiffies;
6219 long nr_pages = work->nr_pages;
6220 - unsigned long oldest_jif;
6221 + unsigned long dirtied_before = jiffies;
6222 struct inode *inode;
6223 long progress;
6224 struct blk_plug plug;
6225
6226 - oldest_jif = jiffies;
6227 - work->older_than_this = &oldest_jif;
6228 -
6229 blk_start_plug(&plug);
6230 spin_lock(&wb->list_lock);
6231 for (;;) {
6232 @@ -1880,14 +1888,14 @@ static long wb_writeback(struct bdi_writeback *wb,
6233 * safe.
6234 */
6235 if (work->for_kupdate) {
6236 - oldest_jif = jiffies -
6237 + dirtied_before = jiffies -
6238 msecs_to_jiffies(dirty_expire_interval * 10);
6239 } else if (work->for_background)
6240 - oldest_jif = jiffies;
6241 + dirtied_before = jiffies;
6242
6243 trace_writeback_start(wb, work);
6244 if (list_empty(&wb->b_io))
6245 - queue_io(wb, work);
6246 + queue_io(wb, work, dirtied_before);
6247 if (work->sb)
6248 progress = writeback_sb_inodes(work->sb, wb, work);
6249 else
6250 @@ -2289,11 +2297,12 @@ void __mark_inode_dirty(struct inode *inode, int flags)
6251 inode->i_state |= flags;
6252
6253 /*
6254 - * If the inode is being synced, just update its dirty state.
6255 - * The unlocker will place the inode on the appropriate
6256 - * superblock list, based upon its state.
6257 + * If the inode is queued for writeback by flush worker, just
6258 + * update its dirty state. Once the flush worker is done with
6259 + * the inode it will place it on the appropriate superblock
6260 + * list, based upon its state.
6261 */
6262 - if (inode->i_state & I_SYNC)
6263 + if (inode->i_state & I_SYNC_QUEUED)
6264 goto out_unlock_inode;
6265
6266 /*
6267 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
6268 index 40306c1eab07c..5fff7cb3582f0 100644
6269 --- a/fs/hugetlbfs/inode.c
6270 +++ b/fs/hugetlbfs/inode.c
6271 @@ -1284,6 +1284,12 @@ hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
6272 sb->s_magic = HUGETLBFS_MAGIC;
6273 sb->s_op = &hugetlbfs_ops;
6274 sb->s_time_gran = 1;
6275 +
6276 + /*
6277 + * Due to the special and limited functionality of hugetlbfs, it does
6278 + * not work well as a stacking filesystem.
6279 + */
6280 + sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
6281 sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
6282 if (!sb->s_root)
6283 goto out_free;
6284 diff --git a/fs/io_uring.c b/fs/io_uring.c
6285 index fada14ee1cdcb..2a539b794f3b0 100644
6286 --- a/fs/io_uring.c
6287 +++ b/fs/io_uring.c
6288 @@ -2378,6 +2378,15 @@ static bool io_add_to_prev_work(struct async_list *list, struct io_kiocb *req)
6289 list_del_init(&req->list);
6290 ret = false;
6291 }
6292 +
6293 + if (ret) {
6294 + struct io_ring_ctx *ctx = req->ctx;
6295 +
6296 + spin_lock_irq(&ctx->task_lock);
6297 + list_add(&req->task_list, &ctx->task_list);
6298 + req->work_task = NULL;
6299 + spin_unlock_irq(&ctx->task_lock);
6300 + }
6301 spin_unlock(&list->lock);
6302 return ret;
6303 }
6304 diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
6305 index de992a70ddfef..90453309345d5 100644
6306 --- a/fs/jbd2/transaction.c
6307 +++ b/fs/jbd2/transaction.c
6308 @@ -1983,6 +1983,9 @@ static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
6309 */
6310 static void __jbd2_journal_unfile_buffer(struct journal_head *jh)
6311 {
6312 + J_ASSERT_JH(jh, jh->b_transaction != NULL);
6313 + J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
6314 +
6315 __jbd2_journal_temp_unlink_buffer(jh);
6316 jh->b_transaction = NULL;
6317 jbd2_journal_put_journal_head(jh);
6318 @@ -2074,6 +2077,7 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
6319 {
6320 struct buffer_head *head;
6321 struct buffer_head *bh;
6322 + bool has_write_io_error = false;
6323 int ret = 0;
6324
6325 J_ASSERT(PageLocked(page));
6326 @@ -2098,11 +2102,26 @@ int jbd2_journal_try_to_free_buffers(journal_t *journal,
6327 jbd_unlock_bh_state(bh);
6328 if (buffer_jbd(bh))
6329 goto busy;
6330 +
6331 + /*
6332 + * If we free a metadata buffer which has been failed to
6333 + * write out, the jbd2 checkpoint procedure will not detect
6334 + * this failure and may lead to filesystem inconsistency
6335 + * after cleanup journal tail.
6336 + */
6337 + if (buffer_write_io_error(bh)) {
6338 + pr_err("JBD2: Error while async write back metadata bh %llu.",
6339 + (unsigned long long)bh->b_blocknr);
6340 + has_write_io_error = true;
6341 + }
6342 } while ((bh = bh->b_this_page) != head);
6343
6344 ret = try_to_free_buffers(page);
6345
6346 busy:
6347 + if (has_write_io_error)
6348 + jbd2_journal_abort(journal, -EIO);
6349 +
6350 return ret;
6351 }
6352
6353 @@ -2530,6 +2549,13 @@ void __jbd2_journal_refile_buffer(struct journal_head *jh)
6354
6355 was_dirty = test_clear_buffer_jbddirty(bh);
6356 __jbd2_journal_temp_unlink_buffer(jh);
6357 +
6358 + /*
6359 + * b_transaction must be set, otherwise the new b_transaction won't
6360 + * be holding jh reference
6361 + */
6362 + J_ASSERT_JH(jh, jh->b_transaction != NULL);
6363 +
6364 /*
6365 * We set b_transaction here because b_next_transaction will inherit
6366 * our jh reference and thus __jbd2_journal_file_buffer() must not
6367 diff --git a/fs/xfs/libxfs/xfs_trans_inode.c b/fs/xfs/libxfs/xfs_trans_inode.c
6368 index a9ad90926b873..6c7354abd0aea 100644
6369 --- a/fs/xfs/libxfs/xfs_trans_inode.c
6370 +++ b/fs/xfs/libxfs/xfs_trans_inode.c
6371 @@ -36,6 +36,7 @@ xfs_trans_ijoin(
6372
6373 ASSERT(iip->ili_lock_flags == 0);
6374 iip->ili_lock_flags = lock_flags;
6375 + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
6376
6377 /*
6378 * Get a log_item_desc to point at the new item.
6379 @@ -91,6 +92,7 @@ xfs_trans_log_inode(
6380
6381 ASSERT(ip->i_itemp != NULL);
6382 ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6383 + ASSERT(!xfs_iflags_test(ip, XFS_ISTALE));
6384
6385 /*
6386 * Don't bother with i_lock for the I_DIRTY_TIME check here, as races
6387 diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
6388 index d95dc9b0f0bba..a1135b86e79f9 100644
6389 --- a/fs/xfs/xfs_icache.c
6390 +++ b/fs/xfs/xfs_icache.c
6391 @@ -1132,7 +1132,7 @@ restart:
6392 goto out_ifunlock;
6393 xfs_iunpin_wait(ip);
6394 }
6395 - if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
6396 + if (xfs_inode_clean(ip)) {
6397 xfs_ifunlock(ip);
6398 goto reclaim;
6399 }
6400 @@ -1219,6 +1219,7 @@ reclaim:
6401 xfs_ilock(ip, XFS_ILOCK_EXCL);
6402 xfs_qm_dqdetach(ip);
6403 xfs_iunlock(ip, XFS_ILOCK_EXCL);
6404 + ASSERT(xfs_inode_clean(ip));
6405
6406 __xfs_inode_free(ip);
6407 return error;
6408 diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
6409 index 18f4b262e61ce..b339ff93df997 100644
6410 --- a/fs/xfs/xfs_inode.c
6411 +++ b/fs/xfs/xfs_inode.c
6412 @@ -1761,10 +1761,31 @@ xfs_inactive_ifree(
6413 return error;
6414 }
6415
6416 + /*
6417 + * We do not hold the inode locked across the entire rolling transaction
6418 + * here. We only need to hold it for the first transaction that
6419 + * xfs_ifree() builds, which may mark the inode XFS_ISTALE if the
6420 + * underlying cluster buffer is freed. Relogging an XFS_ISTALE inode
6421 + * here breaks the relationship between cluster buffer invalidation and
6422 + * stale inode invalidation on cluster buffer item journal commit
6423 + * completion, and can result in leaving dirty stale inodes hanging
6424 + * around in memory.
6425 + *
6426 + * We have no need for serialising this inode operation against other
6427 + * operations - we freed the inode and hence reallocation is required
6428 + * and that will serialise on reallocating the space the deferops need
6429 + * to free. Hence we can unlock the inode on the first commit of
6430 + * the transaction rather than roll it right through the deferops. This
6431 + * avoids relogging the XFS_ISTALE inode.
6432 + *
6433 + * We check that xfs_ifree() hasn't grown an internal transaction roll
6434 + * by asserting that the inode is still locked when it returns.
6435 + */
6436 xfs_ilock(ip, XFS_ILOCK_EXCL);
6437 - xfs_trans_ijoin(tp, ip, 0);
6438 + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
6439
6440 error = xfs_ifree(tp, ip);
6441 + ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
6442 if (error) {
6443 /*
6444 * If we fail to free the inode, shut down. The cancel
6445 @@ -1777,7 +1798,6 @@ xfs_inactive_ifree(
6446 xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
6447 }
6448 xfs_trans_cancel(tp);
6449 - xfs_iunlock(ip, XFS_ILOCK_EXCL);
6450 return error;
6451 }
6452
6453 @@ -1795,7 +1815,6 @@ xfs_inactive_ifree(
6454 xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
6455 __func__, error);
6456
6457 - xfs_iunlock(ip, XFS_ILOCK_EXCL);
6458 return 0;
6459 }
6460
6461 diff --git a/include/linux/efi.h b/include/linux/efi.h
6462 index d87acf62958e2..13ed2c6b13f8b 100644
6463 --- a/include/linux/efi.h
6464 +++ b/include/linux/efi.h
6465 @@ -1039,7 +1039,11 @@ extern void *efi_get_pal_addr (void);
6466 extern void efi_map_pal_code (void);
6467 extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
6468 extern void efi_gettimeofday (struct timespec64 *ts);
6469 +#ifdef CONFIG_EFI
6470 extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
6471 +#else
6472 +static inline void efi_enter_virtual_mode (void) {}
6473 +#endif
6474 #ifdef CONFIG_X86
6475 extern efi_status_t efi_query_variable_store(u32 attributes,
6476 unsigned long size,
6477 diff --git a/include/linux/fb.h b/include/linux/fb.h
6478 index 756706b666a10..8221838fefd98 100644
6479 --- a/include/linux/fb.h
6480 +++ b/include/linux/fb.h
6481 @@ -400,8 +400,6 @@ struct fb_tile_ops {
6482 #define FBINFO_HWACCEL_YPAN 0x2000 /* optional */
6483 #define FBINFO_HWACCEL_YWRAP 0x4000 /* optional */
6484
6485 -#define FBINFO_MISC_USEREVENT 0x10000 /* event request
6486 - from userspace */
6487 #define FBINFO_MISC_TILEBLITTING 0x20000 /* use tile blitting */
6488
6489 /* A driver may set this flag to indicate that it does want a set_par to be
6490 diff --git a/include/linux/fs.h b/include/linux/fs.h
6491 index 5bd384dbdca58..4c82683e034a7 100644
6492 --- a/include/linux/fs.h
6493 +++ b/include/linux/fs.h
6494 @@ -2140,6 +2140,10 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
6495 *
6496 * I_CREATING New object's inode in the middle of setting up.
6497 *
6498 + * I_SYNC_QUEUED Inode is queued in b_io or b_more_io writeback lists.
6499 + * Used to detect that mark_inode_dirty() should not move
6500 + * inode between dirty lists.
6501 + *
6502 * Q: What is the difference between I_WILL_FREE and I_FREEING?
6503 */
6504 #define I_DIRTY_SYNC (1 << 0)
6505 @@ -2157,11 +2161,11 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
6506 #define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP)
6507 #define I_LINKABLE (1 << 10)
6508 #define I_DIRTY_TIME (1 << 11)
6509 -#define __I_DIRTY_TIME_EXPIRED 12
6510 -#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
6511 +#define I_DIRTY_TIME_EXPIRED (1 << 12)
6512 #define I_WB_SWITCH (1 << 13)
6513 #define I_OVL_INUSE (1 << 14)
6514 #define I_CREATING (1 << 15)
6515 +#define I_SYNC_QUEUED (1 << 17)
6516
6517 #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
6518 #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
6519 diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
6520 index aac42c28fe62d..9b67394471e1c 100644
6521 --- a/include/linux/netfilter_ipv6.h
6522 +++ b/include/linux/netfilter_ipv6.h
6523 @@ -58,7 +58,6 @@ struct nf_ipv6_ops {
6524 int (*output)(struct net *, struct sock *, struct sk_buff *));
6525 int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
6526 #if IS_MODULE(CONFIG_IPV6)
6527 - int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
6528 int (*br_fragment)(struct net *net, struct sock *sk,
6529 struct sk_buff *skb,
6530 struct nf_bridge_frag_data *data,
6531 @@ -117,23 +116,6 @@ static inline int nf_ip6_route(struct net *net, struct dst_entry **dst,
6532
6533 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
6534
6535 -static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
6536 - u32 user)
6537 -{
6538 -#if IS_MODULE(CONFIG_IPV6)
6539 - const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops();
6540 -
6541 - if (!v6_ops)
6542 - return 1;
6543 -
6544 - return v6_ops->br_defrag(net, skb, user);
6545 -#elif IS_BUILTIN(CONFIG_IPV6)
6546 - return nf_ct_frag6_gather(net, skb, user);
6547 -#else
6548 - return 1;
6549 -#endif
6550 -}
6551 -
6552 int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
6553 struct nf_bridge_frag_data *data,
6554 int (*output)(struct net *, struct sock *sk,
6555 diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
6556 index 66282552db207..67434278b81dd 100644
6557 --- a/include/trace/events/writeback.h
6558 +++ b/include/trace/events/writeback.h
6559 @@ -499,8 +499,9 @@ DEFINE_WBC_EVENT(wbc_writepage);
6560 TRACE_EVENT(writeback_queue_io,
6561 TP_PROTO(struct bdi_writeback *wb,
6562 struct wb_writeback_work *work,
6563 + unsigned long dirtied_before,
6564 int moved),
6565 - TP_ARGS(wb, work, moved),
6566 + TP_ARGS(wb, work, dirtied_before, moved),
6567 TP_STRUCT__entry(
6568 __array(char, name, 32)
6569 __field(unsigned long, older)
6570 @@ -510,19 +511,17 @@ TRACE_EVENT(writeback_queue_io,
6571 __field(unsigned int, cgroup_ino)
6572 ),
6573 TP_fast_assign(
6574 - unsigned long *older_than_this = work->older_than_this;
6575 strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
6576 - __entry->older = older_than_this ? *older_than_this : 0;
6577 - __entry->age = older_than_this ?
6578 - (jiffies - *older_than_this) * 1000 / HZ : -1;
6579 + __entry->older = dirtied_before;
6580 + __entry->age = (jiffies - dirtied_before) * 1000 / HZ;
6581 __entry->moved = moved;
6582 __entry->reason = work->reason;
6583 __entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
6584 ),
6585 TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
6586 __entry->name,
6587 - __entry->older, /* older_than_this in jiffies */
6588 - __entry->age, /* older_than_this in relative milliseconds */
6589 + __entry->older, /* dirtied_before in jiffies */
6590 + __entry->age, /* dirtied_before in relative milliseconds */
6591 __entry->moved,
6592 __print_symbolic(__entry->reason, WB_WORK_REASON),
6593 __entry->cgroup_ino
6594 diff --git a/kernel/Makefile b/kernel/Makefile
6595 index daad787fb795d..42557f251fea6 100644
6596 --- a/kernel/Makefile
6597 +++ b/kernel/Makefile
6598 @@ -128,7 +128,7 @@ $(obj)/config_data.gz: $(KCONFIG_CONFIG) FORCE
6599 $(obj)/kheaders.o: $(obj)/kheaders_data.tar.xz
6600
6601 quiet_cmd_genikh = CHK $(obj)/kheaders_data.tar.xz
6602 - cmd_genikh = $(BASH) $(srctree)/kernel/gen_kheaders.sh $@
6603 + cmd_genikh = $(CONFIG_SHELL) $(srctree)/kernel/gen_kheaders.sh $@
6604 $(obj)/kheaders_data.tar.xz: FORCE
6605 $(call cmd,genikh)
6606
6607 diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
6608 index 5a0fc0b0403a6..c1510f0ab3ea5 100755
6609 --- a/kernel/gen_kheaders.sh
6610 +++ b/kernel/gen_kheaders.sh
6611 @@ -1,4 +1,4 @@
6612 -#!/bin/bash
6613 +#!/bin/sh
6614 # SPDX-License-Identifier: GPL-2.0
6615
6616 # This script generates an archive consisting of kernel headers
6617 @@ -21,30 +21,38 @@ arch/$SRCARCH/include/
6618 # Uncomment it for debugging.
6619 # if [ ! -f /tmp/iter ]; then iter=1; echo 1 > /tmp/iter;
6620 # else iter=$(($(cat /tmp/iter) + 1)); echo $iter > /tmp/iter; fi
6621 -# find $src_file_list -name "*.h" | xargs ls -l > /tmp/src-ls-$iter
6622 -# find $obj_file_list -name "*.h" | xargs ls -l > /tmp/obj-ls-$iter
6623 +# find $all_dirs -name "*.h" | xargs ls -l > /tmp/ls-$iter
6624 +
6625 +all_dirs=
6626 +if [ "$building_out_of_srctree" ]; then
6627 + for d in $dir_list; do
6628 + all_dirs="$all_dirs $srctree/$d"
6629 + done
6630 +fi
6631 +all_dirs="$all_dirs $dir_list"
6632
6633 # include/generated/compile.h is ignored because it is touched even when none
6634 -# of the source files changed. This causes pointless regeneration, so let us
6635 -# ignore them for md5 calculation.
6636 -pushd $srctree > /dev/null
6637 -src_files_md5="$(find $dir_list -name "*.h" |
6638 - grep -v "include/generated/compile.h" |
6639 - grep -v "include/generated/autoconf.h" |
6640 - xargs ls -l | md5sum | cut -d ' ' -f1)"
6641 -popd > /dev/null
6642 -obj_files_md5="$(find $dir_list -name "*.h" |
6643 - grep -v "include/generated/compile.h" |
6644 - grep -v "include/generated/autoconf.h" |
6645 +# of the source files changed.
6646 +#
6647 +# When Kconfig regenerates include/generated/autoconf.h, its timestamp is
6648 +# updated, but the contents might be still the same. When any CONFIG option is
6649 +# changed, Kconfig touches the corresponding timestamp file include/config/*.h.
6650 +# Hence, the md5sum detects the configuration change anyway. We do not need to
6651 +# check include/generated/autoconf.h explicitly.
6652 +#
6653 +# Ignore them for md5 calculation to avoid pointless regeneration.
6654 +headers_md5="$(find $all_dirs -name "*.h" |
6655 + grep -v "include/generated/compile.h" |
6656 + grep -v "include/generated/autoconf.h" |
6657 xargs ls -l | md5sum | cut -d ' ' -f1)"
6658 +
6659 # Any changes to this script will also cause a rebuild of the archive.
6660 this_file_md5="$(ls -l $sfile | md5sum | cut -d ' ' -f1)"
6661 if [ -f $tarfile ]; then tarfile_md5="$(md5sum $tarfile | cut -d ' ' -f1)"; fi
6662 if [ -f kernel/kheaders.md5 ] &&
6663 - [ "$(cat kernel/kheaders.md5|head -1)" == "$src_files_md5" ] &&
6664 - [ "$(cat kernel/kheaders.md5|head -2|tail -1)" == "$obj_files_md5" ] &&
6665 - [ "$(cat kernel/kheaders.md5|head -3|tail -1)" == "$this_file_md5" ] &&
6666 - [ "$(cat kernel/kheaders.md5|tail -1)" == "$tarfile_md5" ]; then
6667 + [ "$(head -n 1 kernel/kheaders.md5)" = "$headers_md5" ] &&
6668 + [ "$(head -n 2 kernel/kheaders.md5 | tail -n 1)" = "$this_file_md5" ] &&
6669 + [ "$(tail -n 1 kernel/kheaders.md5)" = "$tarfile_md5" ]; then
6670 exit
6671 fi
6672
6673 @@ -55,14 +63,17 @@ fi
6674 rm -rf $cpio_dir
6675 mkdir $cpio_dir
6676
6677 -pushd $srctree > /dev/null
6678 -for f in $dir_list;
6679 - do find "$f" -name "*.h";
6680 -done | cpio --quiet -pd $cpio_dir
6681 -popd > /dev/null
6682 +if [ "$building_out_of_srctree" ]; then
6683 + (
6684 + cd $srctree
6685 + for f in $dir_list
6686 + do find "$f" -name "*.h";
6687 + done | cpio --quiet -pd $cpio_dir
6688 + )
6689 +fi
6690
6691 -# The second CPIO can complain if files already exist which can
6692 -# happen with out of tree builds. Just silence CPIO for now.
6693 +# The second CPIO can complain if files already exist which can happen with out
6694 +# of tree builds having stale headers in srctree. Just silence CPIO for now.
6695 for f in $dir_list;
6696 do find "$f" -name "*.h";
6697 done | cpio --quiet -pd $cpio_dir >/dev/null 2>&1
6698 @@ -77,10 +88,9 @@ find $cpio_dir -type f -print0 |
6699 find $cpio_dir -printf "./%P\n" | LC_ALL=C sort | \
6700 tar "${KBUILD_BUILD_TIMESTAMP:+--mtime=$KBUILD_BUILD_TIMESTAMP}" \
6701 --owner=0 --group=0 --numeric-owner --no-recursion \
6702 - -Jcf $tarfile -C $cpio_dir/ -T - > /dev/null
6703 + -I $XZ -cf $tarfile -C $cpio_dir/ -T - > /dev/null
6704
6705 -echo "$src_files_md5" > kernel/kheaders.md5
6706 -echo "$obj_files_md5" >> kernel/kheaders.md5
6707 +echo $headers_md5 > kernel/kheaders.md5
6708 echo "$this_file_md5" >> kernel/kheaders.md5
6709 echo "$(md5sum $tarfile | cut -d ' ' -f1)" >> kernel/kheaders.md5
6710
6711 diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
6712 index 30cc217b86318..651a4ad6d711f 100644
6713 --- a/kernel/irq/matrix.c
6714 +++ b/kernel/irq/matrix.c
6715 @@ -380,6 +380,13 @@ int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
6716 unsigned int cpu, bit;
6717 struct cpumap *cm;
6718
6719 + /*
6720 + * Not required in theory, but matrix_find_best_cpu() uses
6721 + * for_each_cpu() which ignores the cpumask on UP .
6722 + */
6723 + if (cpumask_empty(msk))
6724 + return -EINVAL;
6725 +
6726 cpu = matrix_find_best_cpu(m, msk);
6727 if (cpu == UINT_MAX)
6728 return -ENOSPC;
6729 diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
6730 index 9bb6d2497b040..581f818181386 100644
6731 --- a/kernel/locking/lockdep_proc.c
6732 +++ b/kernel/locking/lockdep_proc.c
6733 @@ -400,7 +400,7 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
6734 seq_time(m, lt->min);
6735 seq_time(m, lt->max);
6736 seq_time(m, lt->total);
6737 - seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
6738 + seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
6739 }
6740
6741 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
6742 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
6743 index b34b5c6e25248..352239c411a44 100644
6744 --- a/kernel/sched/core.c
6745 +++ b/kernel/sched/core.c
6746 @@ -794,6 +794,26 @@ unsigned int sysctl_sched_uclamp_util_max = SCHED_CAPACITY_SCALE;
6747 /* All clamps are required to be less or equal than these values */
6748 static struct uclamp_se uclamp_default[UCLAMP_CNT];
6749
6750 +/*
6751 + * This static key is used to reduce the uclamp overhead in the fast path. It
6752 + * primarily disables the call to uclamp_rq_{inc, dec}() in
6753 + * enqueue/dequeue_task().
6754 + *
6755 + * This allows users to continue to enable uclamp in their kernel config with
6756 + * minimum uclamp overhead in the fast path.
6757 + *
6758 + * As soon as userspace modifies any of the uclamp knobs, the static key is
6759 + * enabled, since we have an actual users that make use of uclamp
6760 + * functionality.
6761 + *
6762 + * The knobs that would enable this static key are:
6763 + *
6764 + * * A task modifying its uclamp value with sched_setattr().
6765 + * * An admin modifying the sysctl_sched_uclamp_{min, max} via procfs.
6766 + * * An admin modifying the cgroup cpu.uclamp.{min, max}
6767 + */
6768 +DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
6769 +
6770 /* Integer rounded range for each bucket */
6771 #define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
6772
6773 @@ -990,10 +1010,38 @@ static inline void uclamp_rq_dec_id(struct rq *rq, struct task_struct *p,
6774
6775 lockdep_assert_held(&rq->lock);
6776
6777 + /*
6778 + * If sched_uclamp_used was enabled after task @p was enqueued,
6779 + * we could end up with unbalanced call to uclamp_rq_dec_id().
6780 + *
6781 + * In this case the uc_se->active flag should be false since no uclamp
6782 + * accounting was performed at enqueue time and we can just return
6783 + * here.
6784 + *
6785 + * Need to be careful of the following enqeueue/dequeue ordering
6786 + * problem too
6787 + *
6788 + * enqueue(taskA)
6789 + * // sched_uclamp_used gets enabled
6790 + * enqueue(taskB)
6791 + * dequeue(taskA)
6792 + * // Must not decrement bukcet->tasks here
6793 + * dequeue(taskB)
6794 + *
6795 + * where we could end up with stale data in uc_se and
6796 + * bucket[uc_se->bucket_id].
6797 + *
6798 + * The following check here eliminates the possibility of such race.
6799 + */
6800 + if (unlikely(!uc_se->active))
6801 + return;
6802 +
6803 bucket = &uc_rq->bucket[uc_se->bucket_id];
6804 +
6805 SCHED_WARN_ON(!bucket->tasks);
6806 if (likely(bucket->tasks))
6807 bucket->tasks--;
6808 +
6809 uc_se->active = false;
6810
6811 /*
6812 @@ -1021,6 +1069,15 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p)
6813 {
6814 enum uclamp_id clamp_id;
6815
6816 + /*
6817 + * Avoid any overhead until uclamp is actually used by the userspace.
6818 + *
6819 + * The condition is constructed such that a NOP is generated when
6820 + * sched_uclamp_used is disabled.
6821 + */
6822 + if (!static_branch_unlikely(&sched_uclamp_used))
6823 + return;
6824 +
6825 if (unlikely(!p->sched_class->uclamp_enabled))
6826 return;
6827
6828 @@ -1036,6 +1093,15 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p)
6829 {
6830 enum uclamp_id clamp_id;
6831
6832 + /*
6833 + * Avoid any overhead until uclamp is actually used by the userspace.
6834 + *
6835 + * The condition is constructed such that a NOP is generated when
6836 + * sched_uclamp_used is disabled.
6837 + */
6838 + if (!static_branch_unlikely(&sched_uclamp_used))
6839 + return;
6840 +
6841 if (unlikely(!p->sched_class->uclamp_enabled))
6842 return;
6843
6844 @@ -1145,8 +1211,10 @@ int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
6845 update_root_tg = true;
6846 }
6847
6848 - if (update_root_tg)
6849 + if (update_root_tg) {
6850 + static_branch_enable(&sched_uclamp_used);
6851 uclamp_update_root_tg();
6852 + }
6853
6854 /*
6855 * We update all RUNNABLE tasks only when task groups are in use.
6856 @@ -1181,6 +1249,15 @@ static int uclamp_validate(struct task_struct *p,
6857 if (upper_bound > SCHED_CAPACITY_SCALE)
6858 return -EINVAL;
6859
6860 + /*
6861 + * We have valid uclamp attributes; make sure uclamp is enabled.
6862 + *
6863 + * We need to do that here, because enabling static branches is a
6864 + * blocking operation which obviously cannot be done while holding
6865 + * scheduler locks.
6866 + */
6867 + static_branch_enable(&sched_uclamp_used);
6868 +
6869 return 0;
6870 }
6871
6872 @@ -7294,6 +7371,8 @@ static ssize_t cpu_uclamp_write(struct kernfs_open_file *of, char *buf,
6873 if (req.ret)
6874 return req.ret;
6875
6876 + static_branch_enable(&sched_uclamp_used);
6877 +
6878 mutex_lock(&uclamp_mutex);
6879 rcu_read_lock();
6880
6881 diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
6882 index b6f56e7c8dd16..4cb80e6042c4f 100644
6883 --- a/kernel/sched/cpufreq_schedutil.c
6884 +++ b/kernel/sched/cpufreq_schedutil.c
6885 @@ -210,7 +210,7 @@ unsigned long schedutil_cpu_util(int cpu, unsigned long util_cfs,
6886 unsigned long dl_util, util, irq;
6887 struct rq *rq = cpu_rq(cpu);
6888
6889 - if (!IS_BUILTIN(CONFIG_UCLAMP_TASK) &&
6890 + if (!uclamp_is_used() &&
6891 type == FREQUENCY_UTIL && rt_rq_is_runnable(&rq->rt)) {
6892 return max;
6893 }
6894 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
6895 index 570659f1c6e22..9f2a9e34a78d5 100644
6896 --- a/kernel/sched/sched.h
6897 +++ b/kernel/sched/sched.h
6898 @@ -841,6 +841,8 @@ struct uclamp_rq {
6899 unsigned int value;
6900 struct uclamp_bucket bucket[UCLAMP_BUCKETS];
6901 };
6902 +
6903 +DECLARE_STATIC_KEY_FALSE(sched_uclamp_used);
6904 #endif /* CONFIG_UCLAMP_TASK */
6905
6906 /*
6907 @@ -2319,12 +2321,35 @@ static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
6908 #ifdef CONFIG_UCLAMP_TASK
6909 unsigned int uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id);
6910
6911 +/**
6912 + * uclamp_util_with - clamp @util with @rq and @p effective uclamp values.
6913 + * @rq: The rq to clamp against. Must not be NULL.
6914 + * @util: The util value to clamp.
6915 + * @p: The task to clamp against. Can be NULL if you want to clamp
6916 + * against @rq only.
6917 + *
6918 + * Clamps the passed @util to the max(@rq, @p) effective uclamp values.
6919 + *
6920 + * If sched_uclamp_used static key is disabled, then just return the util
6921 + * without any clamping since uclamp aggregation at the rq level in the fast
6922 + * path is disabled, rendering this operation a NOP.
6923 + *
6924 + * Use uclamp_eff_value() if you don't care about uclamp values at rq level. It
6925 + * will return the correct effective uclamp value of the task even if the
6926 + * static key is disabled.
6927 + */
6928 static __always_inline
6929 unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
6930 struct task_struct *p)
6931 {
6932 - unsigned int min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
6933 - unsigned int max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
6934 + unsigned int min_util;
6935 + unsigned int max_util;
6936 +
6937 + if (!static_branch_likely(&sched_uclamp_used))
6938 + return util;
6939 +
6940 + min_util = READ_ONCE(rq->uclamp[UCLAMP_MIN].value);
6941 + max_util = READ_ONCE(rq->uclamp[UCLAMP_MAX].value);
6942
6943 if (p) {
6944 min_util = max(min_util, uclamp_eff_value(p, UCLAMP_MIN));
6945 @@ -2346,6 +2371,19 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
6946 {
6947 return uclamp_util_with(rq, util, NULL);
6948 }
6949 +
6950 +/*
6951 + * When uclamp is compiled in, the aggregation at rq level is 'turned off'
6952 + * by default in the fast path and only gets turned on once userspace performs
6953 + * an operation that requires it.
6954 + *
6955 + * Returns true if userspace opted-in to use uclamp and aggregation at rq level
6956 + * hence is active.
6957 + */
6958 +static inline bool uclamp_is_used(void)
6959 +{
6960 + return static_branch_likely(&sched_uclamp_used);
6961 +}
6962 #else /* CONFIG_UCLAMP_TASK */
6963 static inline unsigned int uclamp_util_with(struct rq *rq, unsigned int util,
6964 struct task_struct *p)
6965 @@ -2356,6 +2394,11 @@ static inline unsigned int uclamp_util(struct rq *rq, unsigned int util)
6966 {
6967 return util;
6968 }
6969 +
6970 +static inline bool uclamp_is_used(void)
6971 +{
6972 + return false;
6973 +}
6974 #endif /* CONFIG_UCLAMP_TASK */
6975
6976 #ifdef arch_scale_freq_capacity
6977 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
6978 index a4c8f9d9522e4..884333b9fc767 100644
6979 --- a/kernel/trace/blktrace.c
6980 +++ b/kernel/trace/blktrace.c
6981 @@ -535,6 +535,18 @@ static int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
6982 #endif
6983 bt->dir = dir = debugfs_create_dir(buts->name, blk_debugfs_root);
6984
6985 + /*
6986 + * As blktrace relies on debugfs for its interface the debugfs directory
6987 + * is required, contrary to the usual mantra of not checking for debugfs
6988 + * files or directories.
6989 + */
6990 + if (IS_ERR_OR_NULL(dir)) {
6991 + pr_warn("debugfs_dir not present for %s so skipping\n",
6992 + buts->name);
6993 + ret = -ENOENT;
6994 + goto err;
6995 + }
6996 +
6997 bt->dev = dev;
6998 atomic_set(&bt->dropped, 0);
6999 INIT_LIST_HEAD(&bt->running_list);
7000 diff --git a/mm/cma.c b/mm/cma.c
7001 index 7fe0b8356775f..7de520c0a1db6 100644
7002 --- a/mm/cma.c
7003 +++ b/mm/cma.c
7004 @@ -93,19 +93,15 @@ static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
7005 mutex_unlock(&cma->lock);
7006 }
7007
7008 -static int __init cma_activate_area(struct cma *cma)
7009 +static void __init cma_activate_area(struct cma *cma)
7010 {
7011 - int bitmap_size = BITS_TO_LONGS(cma_bitmap_maxno(cma)) * sizeof(long);
7012 unsigned long base_pfn = cma->base_pfn, pfn = base_pfn;
7013 unsigned i = cma->count >> pageblock_order;
7014 struct zone *zone;
7015
7016 - cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
7017 -
7018 - if (!cma->bitmap) {
7019 - cma->count = 0;
7020 - return -ENOMEM;
7021 - }
7022 + cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
7023 + if (!cma->bitmap)
7024 + goto out_error;
7025
7026 WARN_ON_ONCE(!pfn_valid(pfn));
7027 zone = page_zone(pfn_to_page(pfn));
7028 @@ -135,25 +131,22 @@ static int __init cma_activate_area(struct cma *cma)
7029 spin_lock_init(&cma->mem_head_lock);
7030 #endif
7031
7032 - return 0;
7033 + return;
7034
7035 not_in_zone:
7036 - pr_err("CMA area %s could not be activated\n", cma->name);
7037 - kfree(cma->bitmap);
7038 + bitmap_free(cma->bitmap);
7039 +out_error:
7040 cma->count = 0;
7041 - return -EINVAL;
7042 + pr_err("CMA area %s could not be activated\n", cma->name);
7043 + return;
7044 }
7045
7046 static int __init cma_init_reserved_areas(void)
7047 {
7048 int i;
7049
7050 - for (i = 0; i < cma_area_count; i++) {
7051 - int ret = cma_activate_area(&cma_areas[i]);
7052 -
7053 - if (ret)
7054 - return ret;
7055 - }
7056 + for (i = 0; i < cma_area_count; i++)
7057 + cma_activate_area(&cma_areas[i]);
7058
7059 return 0;
7060 }
7061 diff --git a/mm/mmu_context.c b/mm/mmu_context.c
7062 index 3e612ae748e96..a1da47e027479 100644
7063 --- a/mm/mmu_context.c
7064 +++ b/mm/mmu_context.c
7065 @@ -25,13 +25,16 @@ void use_mm(struct mm_struct *mm)
7066 struct task_struct *tsk = current;
7067
7068 task_lock(tsk);
7069 + /* Hold off tlb flush IPIs while switching mm's */
7070 + local_irq_disable();
7071 active_mm = tsk->active_mm;
7072 if (active_mm != mm) {
7073 mmgrab(mm);
7074 tsk->active_mm = mm;
7075 }
7076 tsk->mm = mm;
7077 - switch_mm(active_mm, mm, tsk);
7078 + switch_mm_irqs_off(active_mm, mm, tsk);
7079 + local_irq_enable();
7080 task_unlock(tsk);
7081 #ifdef finish_arch_post_lock_switch
7082 finish_arch_post_lock_switch();
7083 @@ -56,9 +59,11 @@ void unuse_mm(struct mm_struct *mm)
7084
7085 task_lock(tsk);
7086 sync_mm_rss(mm);
7087 + local_irq_disable();
7088 tsk->mm = NULL;
7089 /* active_mm is still 'mm' */
7090 enter_lazy_tlb(mm, tsk);
7091 + local_irq_enable();
7092 task_unlock(tsk);
7093 }
7094 EXPORT_SYMBOL_GPL(unuse_mm);
7095 diff --git a/mm/shuffle.c b/mm/shuffle.c
7096 index b3fe97fd66541..56958ffa5a3a9 100644
7097 --- a/mm/shuffle.c
7098 +++ b/mm/shuffle.c
7099 @@ -58,25 +58,25 @@ module_param_call(shuffle, shuffle_store, shuffle_show, &shuffle_param, 0400);
7100 * For two pages to be swapped in the shuffle, they must be free (on a
7101 * 'free_area' lru), have the same order, and have the same migratetype.
7102 */
7103 -static struct page * __meminit shuffle_valid_page(unsigned long pfn, int order)
7104 +static struct page * __meminit shuffle_valid_page(struct zone *zone,
7105 + unsigned long pfn, int order)
7106 {
7107 - struct page *page;
7108 + struct page *page = pfn_to_online_page(pfn);
7109
7110 /*
7111 * Given we're dealing with randomly selected pfns in a zone we
7112 * need to ask questions like...
7113 */
7114
7115 - /* ...is the pfn even in the memmap? */
7116 - if (!pfn_valid_within(pfn))
7117 + /* ... is the page managed by the buddy? */
7118 + if (!page)
7119 return NULL;
7120
7121 - /* ...is the pfn in a present section or a hole? */
7122 - if (!pfn_present(pfn))
7123 + /* ... is the page assigned to the same zone? */
7124 + if (page_zone(page) != zone)
7125 return NULL;
7126
7127 /* ...is the page free and currently on a free_area list? */
7128 - page = pfn_to_page(pfn);
7129 if (!PageBuddy(page))
7130 return NULL;
7131
7132 @@ -123,7 +123,7 @@ void __meminit __shuffle_zone(struct zone *z)
7133 * page_j randomly selected in the span @zone_start_pfn to
7134 * @spanned_pages.
7135 */
7136 - page_i = shuffle_valid_page(i, order);
7137 + page_i = shuffle_valid_page(z, i, order);
7138 if (!page_i)
7139 continue;
7140
7141 @@ -137,7 +137,7 @@ void __meminit __shuffle_zone(struct zone *z)
7142 j = z->zone_start_pfn +
7143 ALIGN_DOWN(get_random_long() % z->spanned_pages,
7144 order_pages);
7145 - page_j = shuffle_valid_page(j, order);
7146 + page_j = shuffle_valid_page(z, j, order);
7147 if (page_j && page_j != page_i)
7148 break;
7149 }
7150 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
7151 index ad4d00bd79147..5797e1eeaa7e6 100644
7152 --- a/mm/vmalloc.c
7153 +++ b/mm/vmalloc.c
7154 @@ -85,6 +85,8 @@ static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
7155 if (pmd_none_or_clear_bad(pmd))
7156 continue;
7157 vunmap_pte_range(pmd, addr, next);
7158 +
7159 + cond_resched();
7160 } while (pmd++, addr = next, addr != end);
7161 }
7162
7163 diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
7164 index 8096732223828..8d033a75a766e 100644
7165 --- a/net/bridge/netfilter/nf_conntrack_bridge.c
7166 +++ b/net/bridge/netfilter/nf_conntrack_bridge.c
7167 @@ -168,6 +168,7 @@ static unsigned int nf_ct_br_defrag4(struct sk_buff *skb,
7168 static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
7169 const struct nf_hook_state *state)
7170 {
7171 +#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
7172 u16 zone_id = NF_CT_DEFAULT_ZONE_ID;
7173 enum ip_conntrack_info ctinfo;
7174 struct br_input_skb_cb cb;
7175 @@ -180,14 +181,17 @@ static unsigned int nf_ct_br_defrag6(struct sk_buff *skb,
7176
7177 br_skb_cb_save(skb, &cb, sizeof(struct inet6_skb_parm));
7178
7179 - err = nf_ipv6_br_defrag(state->net, skb,
7180 - IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
7181 + err = nf_ct_frag6_gather(state->net, skb,
7182 + IP_DEFRAG_CONNTRACK_BRIDGE_IN + zone_id);
7183 /* queued */
7184 if (err == -EINPROGRESS)
7185 return NF_STOLEN;
7186
7187 br_skb_cb_restore(skb, &cb, IP6CB(skb)->frag_max_size);
7188 return err == 0 ? NF_ACCEPT : NF_DROP;
7189 +#else
7190 + return NF_ACCEPT;
7191 +#endif
7192 }
7193
7194 static int nf_ct_br_ip_check(const struct sk_buff *skb)
7195 diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
7196 index dbd215cbc53d8..a8dd956b5e8e1 100644
7197 --- a/net/can/j1939/transport.c
7198 +++ b/net/can/j1939/transport.c
7199 @@ -1803,7 +1803,20 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
7200 }
7201
7202 tpdat = se_skb->data;
7203 - memcpy(&tpdat[offset], &dat[1], nbytes);
7204 + if (!session->transmission) {
7205 + memcpy(&tpdat[offset], &dat[1], nbytes);
7206 + } else {
7207 + int err;
7208 +
7209 + err = memcmp(&tpdat[offset], &dat[1], nbytes);
7210 + if (err)
7211 + netdev_err_once(priv->ndev,
7212 + "%s: 0x%p: Data of RX-looped back packet (%*ph) doesn't match TX data (%*ph)!\n",
7213 + __func__, session,
7214 + nbytes, &dat[1],
7215 + nbytes, &tpdat[offset]);
7216 + }
7217 +
7218 if (packet == session->pkt.rx)
7219 session->pkt.rx++;
7220
7221 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
7222 index 973a71f4bc898..f80b6999ca1cb 100644
7223 --- a/net/core/skbuff.c
7224 +++ b/net/core/skbuff.c
7225 @@ -5317,8 +5317,8 @@ struct sk_buff *skb_vlan_untag(struct sk_buff *skb)
7226 skb = skb_share_check(skb, GFP_ATOMIC);
7227 if (unlikely(!skb))
7228 goto err_free;
7229 -
7230 - if (unlikely(!pskb_may_pull(skb, VLAN_HLEN)))
7231 + /* We may access the two bytes after vlan_hdr in vlan_set_encap_proto(). */
7232 + if (unlikely(!pskb_may_pull(skb, VLAN_HLEN + sizeof(unsigned short))))
7233 goto err_free;
7234
7235 vhdr = (struct vlan_hdr *)skb->data;
7236 diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c
7237 index a01f500d6a6ba..afa2c5049845f 100644
7238 --- a/net/ipv4/nexthop.c
7239 +++ b/net/ipv4/nexthop.c
7240 @@ -403,7 +403,7 @@ static int nh_check_attr_group(struct net *net, struct nlattr *tb[],
7241 struct nexthop_grp *nhg;
7242 unsigned int i, j;
7243
7244 - if (len & (sizeof(struct nexthop_grp) - 1)) {
7245 + if (!len || len & (sizeof(struct nexthop_grp) - 1)) {
7246 NL_SET_ERR_MSG(extack,
7247 "Invalid length for nexthop group attribute");
7248 return -EINVAL;
7249 @@ -1105,6 +1105,9 @@ static struct nexthop *nexthop_create_group(struct net *net,
7250 struct nexthop *nh;
7251 int i;
7252
7253 + if (WARN_ON(!num_nh))
7254 + return ERR_PTR(-EINVAL);
7255 +
7256 nh = nexthop_alloc();
7257 if (!nh)
7258 return ERR_PTR(-ENOMEM);
7259 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
7260 index b5dd20c4599bb..8dcf7bacc99a6 100644
7261 --- a/net/ipv6/ip6_tunnel.c
7262 +++ b/net/ipv6/ip6_tunnel.c
7263 @@ -860,7 +860,15 @@ int ip6_tnl_rcv(struct ip6_tnl *t, struct sk_buff *skb,
7264 struct metadata_dst *tun_dst,
7265 bool log_ecn_err)
7266 {
7267 - return __ip6_tnl_rcv(t, skb, tpi, tun_dst, ip6ip6_dscp_ecn_decapsulate,
7268 + int (*dscp_ecn_decapsulate)(const struct ip6_tnl *t,
7269 + const struct ipv6hdr *ipv6h,
7270 + struct sk_buff *skb);
7271 +
7272 + dscp_ecn_decapsulate = ip6ip6_dscp_ecn_decapsulate;
7273 + if (tpi->proto == htons(ETH_P_IP))
7274 + dscp_ecn_decapsulate = ip4ip6_dscp_ecn_decapsulate;
7275 +
7276 + return __ip6_tnl_rcv(t, skb, tpi, tun_dst, dscp_ecn_decapsulate,
7277 log_ecn_err);
7278 }
7279 EXPORT_SYMBOL(ip6_tnl_rcv);
7280 diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
7281 index 409e79b84a830..6d0e942d082d4 100644
7282 --- a/net/ipv6/netfilter.c
7283 +++ b/net/ipv6/netfilter.c
7284 @@ -245,9 +245,6 @@ static const struct nf_ipv6_ops ipv6ops = {
7285 .route_input = ip6_route_input,
7286 .fragment = ip6_fragment,
7287 .reroute = nf_ip6_reroute,
7288 -#if IS_MODULE(CONFIG_IPV6) && IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
7289 - .br_defrag = nf_ct_frag6_gather,
7290 -#endif
7291 #if IS_MODULE(CONFIG_IPV6)
7292 .br_fragment = br_ip6_fragment,
7293 #endif
7294 diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
7295 index 1ce1e710d0252..a699e318b9a01 100644
7296 --- a/net/qrtr/qrtr.c
7297 +++ b/net/qrtr/qrtr.c
7298 @@ -547,23 +547,25 @@ static void qrtr_port_remove(struct qrtr_sock *ipc)
7299 */
7300 static int qrtr_port_assign(struct qrtr_sock *ipc, int *port)
7301 {
7302 + u32 min_port;
7303 int rc;
7304
7305 mutex_lock(&qrtr_port_lock);
7306 if (!*port) {
7307 - rc = idr_alloc(&qrtr_ports, ipc,
7308 - QRTR_MIN_EPH_SOCKET, QRTR_MAX_EPH_SOCKET + 1,
7309 - GFP_ATOMIC);
7310 - if (rc >= 0)
7311 - *port = rc;
7312 + min_port = QRTR_MIN_EPH_SOCKET;
7313 + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, QRTR_MAX_EPH_SOCKET, GFP_ATOMIC);
7314 + if (!rc)
7315 + *port = min_port;
7316 } else if (*port < QRTR_MIN_EPH_SOCKET && !capable(CAP_NET_ADMIN)) {
7317 rc = -EACCES;
7318 } else if (*port == QRTR_PORT_CTRL) {
7319 - rc = idr_alloc(&qrtr_ports, ipc, 0, 1, GFP_ATOMIC);
7320 + min_port = 0;
7321 + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, 0, GFP_ATOMIC);
7322 } else {
7323 - rc = idr_alloc(&qrtr_ports, ipc, *port, *port + 1, GFP_ATOMIC);
7324 - if (rc >= 0)
7325 - *port = rc;
7326 + min_port = *port;
7327 + rc = idr_alloc_u32(&qrtr_ports, ipc, &min_port, *port, GFP_ATOMIC);
7328 + if (!rc)
7329 + *port = min_port;
7330 }
7331 mutex_unlock(&qrtr_port_lock);
7332
7333 diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
7334 index e0060aefbf9d8..e32c4732ddf83 100644
7335 --- a/net/sched/act_ct.c
7336 +++ b/net/sched/act_ct.c
7337 @@ -186,7 +186,7 @@ static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
7338 memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
7339 err = nf_ct_frag6_gather(net, skb, user);
7340 if (err && err != -EINPROGRESS)
7341 - goto out_free;
7342 + return err;
7343 #else
7344 err = -EOPNOTSUPP;
7345 goto out_free;
7346 diff --git a/net/sctp/stream.c b/net/sctp/stream.c
7347 index e13cbd5c01932..cd20638b61514 100644
7348 --- a/net/sctp/stream.c
7349 +++ b/net/sctp/stream.c
7350 @@ -88,12 +88,13 @@ static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
7351 int ret;
7352
7353 if (outcnt <= stream->outcnt)
7354 - return 0;
7355 + goto out;
7356
7357 ret = genradix_prealloc(&stream->out, outcnt, gfp);
7358 if (ret)
7359 return ret;
7360
7361 +out:
7362 stream->outcnt = outcnt;
7363 return 0;
7364 }
7365 @@ -104,12 +105,13 @@ static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
7366 int ret;
7367
7368 if (incnt <= stream->incnt)
7369 - return 0;
7370 + goto out;
7371
7372 ret = genradix_prealloc(&stream->in, incnt, gfp);
7373 if (ret)
7374 return ret;
7375
7376 +out:
7377 stream->incnt = incnt;
7378 return 0;
7379 }
7380 diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
7381 index e1f64f4ba2361..da9ba6d1679b7 100644
7382 --- a/net/smc/smc_diag.c
7383 +++ b/net/smc/smc_diag.c
7384 @@ -170,13 +170,15 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
7385 (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
7386 !list_empty(&smc->conn.lgr->list)) {
7387 struct smc_connection *conn = &smc->conn;
7388 - struct smcd_diag_dmbinfo dinfo = {
7389 - .linkid = *((u32 *)conn->lgr->id),
7390 - .peer_gid = conn->lgr->peer_gid,
7391 - .my_gid = conn->lgr->smcd->local_gid,
7392 - .token = conn->rmb_desc->token,
7393 - .peer_token = conn->peer_token
7394 - };
7395 + struct smcd_diag_dmbinfo dinfo;
7396 +
7397 + memset(&dinfo, 0, sizeof(dinfo));
7398 +
7399 + dinfo.linkid = *((u32 *)conn->lgr->id);
7400 + dinfo.peer_gid = conn->lgr->peer_gid;
7401 + dinfo.my_gid = conn->lgr->smcd->local_gid;
7402 + dinfo.token = conn->rmb_desc->token;
7403 + dinfo.peer_token = conn->peer_token;
7404
7405 if (nla_put(skb, SMC_DIAG_DMBINFO, sizeof(dinfo), &dinfo) < 0)
7406 goto errout;
7407 diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
7408 index d4d2928424e2f..11be9a84f8de9 100644
7409 --- a/net/tipc/netlink_compat.c
7410 +++ b/net/tipc/netlink_compat.c
7411 @@ -255,8 +255,9 @@ err_out:
7412 static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
7413 struct tipc_nl_compat_msg *msg)
7414 {
7415 - int err;
7416 + struct nlmsghdr *nlh;
7417 struct sk_buff *arg;
7418 + int err;
7419
7420 if (msg->req_type && (!msg->req_size ||
7421 !TLV_CHECK_TYPE(msg->req, msg->req_type)))
7422 @@ -285,6 +286,15 @@ static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
7423 return -ENOMEM;
7424 }
7425
7426 + nlh = nlmsg_put(arg, 0, 0, tipc_genl_family.id, 0, NLM_F_MULTI);
7427 + if (!nlh) {
7428 + kfree_skb(arg);
7429 + kfree_skb(msg->rep);
7430 + msg->rep = NULL;
7431 + return -EMSGSIZE;
7432 + }
7433 + nlmsg_end(arg, nlh);
7434 +
7435 err = __tipc_nl_compat_dumpit(cmd, msg, arg);
7436 if (err) {
7437 kfree_skb(msg->rep);
7438 diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
7439 index 342618a2bccb4..a6d0044328b1f 100644
7440 --- a/scripts/Makefile.lib
7441 +++ b/scripts/Makefile.lib
7442 @@ -230,7 +230,7 @@ cmd_objcopy = $(OBJCOPY) $(OBJCOPYFLAGS) $(OBJCOPYFLAGS_$(@F)) $< $@
7443 # ---------------------------------------------------------------------------
7444
7445 quiet_cmd_gzip = GZIP $@
7446 - cmd_gzip = cat $(real-prereqs) | gzip -n -f -9 > $@
7447 + cmd_gzip = cat $(real-prereqs) | $(KGZIP) -n -f -9 > $@
7448
7449 # DTC
7450 # ---------------------------------------------------------------------------
7451 @@ -322,19 +322,19 @@ printf "%08x\n" $$dec_size | \
7452 )
7453
7454 quiet_cmd_bzip2 = BZIP2 $@
7455 - cmd_bzip2 = { cat $(real-prereqs) | bzip2 -9; $(size_append); } > $@
7456 + cmd_bzip2 = { cat $(real-prereqs) | $(KBZIP2) -9; $(size_append); } > $@
7457
7458 # Lzma
7459 # ---------------------------------------------------------------------------
7460
7461 quiet_cmd_lzma = LZMA $@
7462 - cmd_lzma = { cat $(real-prereqs) | lzma -9; $(size_append); } > $@
7463 + cmd_lzma = { cat $(real-prereqs) | $(LZMA) -9; $(size_append); } > $@
7464
7465 quiet_cmd_lzo = LZO $@
7466 - cmd_lzo = { cat $(real-prereqs) | lzop -9; $(size_append); } > $@
7467 + cmd_lzo = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@
7468
7469 quiet_cmd_lz4 = LZ4 $@
7470 - cmd_lz4 = { cat $(real-prereqs) | lz4c -l -c1 stdin stdout; \
7471 + cmd_lz4 = { cat $(real-prereqs) | $(LZ4) -l -c1 stdin stdout; \
7472 $(size_append); } > $@
7473
7474 # U-Boot mkimage
7475 @@ -381,7 +381,7 @@ quiet_cmd_xzkern = XZKERN $@
7476 $(size_append); } > $@
7477
7478 quiet_cmd_xzmisc = XZMISC $@
7479 - cmd_xzmisc = cat $(real-prereqs) | xz --check=crc32 --lzma2=dict=1MiB > $@
7480 + cmd_xzmisc = cat $(real-prereqs) | $(XZ) --check=crc32 --lzma2=dict=1MiB > $@
7481
7482 # ASM offsets
7483 # ---------------------------------------------------------------------------
7484 diff --git a/scripts/Makefile.package b/scripts/Makefile.package
7485 index 56eadcc48d46d..35a617c296115 100644
7486 --- a/scripts/Makefile.package
7487 +++ b/scripts/Makefile.package
7488 @@ -45,7 +45,7 @@ if test "$(objtree)" != "$(srctree)"; then \
7489 false; \
7490 fi ; \
7491 $(srctree)/scripts/setlocalversion --save-scmversion; \
7492 -tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
7493 +tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \
7494 --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
7495 rm -f $(objtree)/.scmversion
7496
7497 @@ -127,9 +127,9 @@ util/PERF-VERSION-GEN $(CURDIR)/$(perf-tar)/); \
7498 tar rf $(perf-tar).tar $(perf-tar)/HEAD $(perf-tar)/PERF-VERSION-FILE; \
7499 rm -r $(perf-tar); \
7500 $(if $(findstring tar-src,$@),, \
7501 -$(if $(findstring bz2,$@),bzip2, \
7502 -$(if $(findstring gz,$@),gzip, \
7503 -$(if $(findstring xz,$@),xz, \
7504 +$(if $(findstring bz2,$@),$(KBZIP2), \
7505 +$(if $(findstring gz,$@),$(KGZIP), \
7506 +$(if $(findstring xz,$@),$(XZ), \
7507 $(error unknown target $@)))) \
7508 -f -9 $(perf-tar).tar)
7509
7510 diff --git a/scripts/package/buildtar b/scripts/package/buildtar
7511 index 2f66c81e4021b..3d541cee16ed0 100755
7512 --- a/scripts/package/buildtar
7513 +++ b/scripts/package/buildtar
7514 @@ -28,15 +28,15 @@ case "${1}" in
7515 opts=
7516 ;;
7517 targz-pkg)
7518 - opts=--gzip
7519 + opts="-I ${KGZIP}"
7520 tarball=${tarball}.gz
7521 ;;
7522 tarbz2-pkg)
7523 - opts=--bzip2
7524 + opts="-I ${KBZIP2}"
7525 tarball=${tarball}.bz2
7526 ;;
7527 tarxz-pkg)
7528 - opts=--xz
7529 + opts="-I ${XZ}"
7530 tarball=${tarball}.xz
7531 ;;
7532 *)
7533 diff --git a/scripts/xz_wrap.sh b/scripts/xz_wrap.sh
7534 index 7a2d372f4885a..76e9cbcfbeab4 100755
7535 --- a/scripts/xz_wrap.sh
7536 +++ b/scripts/xz_wrap.sh
7537 @@ -20,4 +20,4 @@ case $SRCARCH in
7538 sparc) BCJ=--sparc ;;
7539 esac
7540
7541 -exec xz --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
7542 +exec $XZ --check=crc32 $BCJ --lzma2=$LZMA2OPTS,dict=32MiB
7543 diff --git a/sound/pci/cs46xx/cs46xx_lib.c b/sound/pci/cs46xx/cs46xx_lib.c
7544 index 5b888b795f7ee..c07a9e735733a 100644
7545 --- a/sound/pci/cs46xx/cs46xx_lib.c
7546 +++ b/sound/pci/cs46xx/cs46xx_lib.c
7547 @@ -766,7 +766,7 @@ static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned
7548 rate = 48000 / 9;
7549
7550 /*
7551 - * We can not capture at at rate greater than the Input Rate (48000).
7552 + * We can not capture at a rate greater than the Input Rate (48000).
7553 * Return an error if an attempt is made to stray outside that limit.
7554 */
7555 if (rate > 48000)
7556 diff --git a/sound/pci/cs46xx/dsp_spos_scb_lib.c b/sound/pci/cs46xx/dsp_spos_scb_lib.c
7557 index 715ead59613da..0bef823c5f61f 100644
7558 --- a/sound/pci/cs46xx/dsp_spos_scb_lib.c
7559 +++ b/sound/pci/cs46xx/dsp_spos_scb_lib.c
7560 @@ -1716,7 +1716,7 @@ int cs46xx_iec958_pre_open (struct snd_cs46xx *chip)
7561 struct dsp_spos_instance * ins = chip->dsp_spos_instance;
7562
7563 if ( ins->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED ) {
7564 - /* remove AsynchFGTxSCB and and PCMSerialInput_II */
7565 + /* remove AsynchFGTxSCB and PCMSerialInput_II */
7566 cs46xx_dsp_disable_spdif_out (chip);
7567
7568 /* save state */
7569 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
7570 index 801abf0fc98b3..103011e7285a3 100644
7571 --- a/sound/pci/hda/hda_codec.c
7572 +++ b/sound/pci/hda/hda_codec.c
7573 @@ -3420,7 +3420,7 @@ EXPORT_SYMBOL_GPL(snd_hda_set_power_save);
7574 * @nid: NID to check / update
7575 *
7576 * Check whether the given NID is in the amp list. If it's in the list,
7577 - * check the current AMP status, and update the the power-status according
7578 + * check the current AMP status, and update the power-status according
7579 * to the mute status.
7580 *
7581 * This function is supposed to be set or called from the check_power_status
7582 diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
7583 index 6815f9dc8545d..e1750bdbe51f6 100644
7584 --- a/sound/pci/hda/hda_generic.c
7585 +++ b/sound/pci/hda/hda_generic.c
7586 @@ -813,7 +813,7 @@ static void activate_amp_in(struct hda_codec *codec, struct nid_path *path,
7587 }
7588 }
7589
7590 -/* sync power of each widget in the the given path */
7591 +/* sync power of each widget in the given path */
7592 static hda_nid_t path_power_update(struct hda_codec *codec,
7593 struct nid_path *path,
7594 bool allow_powerdown)
7595 diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
7596 index 7353d2ec359ae..3a456410937b5 100644
7597 --- a/sound/pci/hda/hda_intel.c
7598 +++ b/sound/pci/hda/hda_intel.c
7599 @@ -2671,6 +2671,8 @@ static const struct pci_device_id azx_ids[] = {
7600 .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI },
7601 /* Zhaoxin */
7602 { PCI_DEVICE(0x1d17, 0x3288), .driver_data = AZX_DRIVER_ZHAOXIN },
7603 + /* Loongson */
7604 + { PCI_DEVICE(0x0014, 0x7a07), .driver_data = AZX_DRIVER_GENERIC },
7605 { 0, }
7606 };
7607 MODULE_DEVICE_TABLE(pci, azx_ids);
7608 diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
7609 index 908b68fda24c9..ec9460f3a288e 100644
7610 --- a/sound/pci/hda/patch_hdmi.c
7611 +++ b/sound/pci/hda/patch_hdmi.c
7612 @@ -176,6 +176,7 @@ struct hdmi_spec {
7613 bool use_jack_detect; /* jack detection enabled */
7614 bool use_acomp_notifier; /* use eld_notify callback for hotplug */
7615 bool acomp_registered; /* audio component registered in this driver */
7616 + bool force_connect; /* force connectivity */
7617 struct drm_audio_component_audio_ops drm_audio_ops;
7618 int (*port2pin)(struct hda_codec *, int); /* reverse port/pin mapping */
7619
7620 @@ -1711,7 +1712,8 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid)
7621 * all device entries on the same pin
7622 */
7623 config = snd_hda_codec_get_pincfg(codec, pin_nid);
7624 - if (get_defcfg_connect(config) == AC_JACK_PORT_NONE)
7625 + if (get_defcfg_connect(config) == AC_JACK_PORT_NONE &&
7626 + !spec->force_connect)
7627 return 0;
7628
7629 /*
7630 @@ -1815,11 +1817,19 @@ static int hdmi_add_cvt(struct hda_codec *codec, hda_nid_t cvt_nid)
7631 return 0;
7632 }
7633
7634 +static const struct snd_pci_quirk force_connect_list[] = {
7635 + SND_PCI_QUIRK(0x103c, 0x870f, "HP", 1),
7636 + SND_PCI_QUIRK(0x103c, 0x871a, "HP", 1),
7637 + {}
7638 +};
7639 +
7640 static int hdmi_parse_codec(struct hda_codec *codec)
7641 {
7642 + struct hdmi_spec *spec = codec->spec;
7643 hda_nid_t start_nid;
7644 unsigned int caps;
7645 int i, nodes;
7646 + const struct snd_pci_quirk *q;
7647
7648 nodes = snd_hda_get_sub_nodes(codec, codec->core.afg, &start_nid);
7649 if (!start_nid || nodes < 0) {
7650 @@ -1827,6 +1837,11 @@ static int hdmi_parse_codec(struct hda_codec *codec)
7651 return -EINVAL;
7652 }
7653
7654 + q = snd_pci_quirk_lookup(codec->bus->pci, force_connect_list);
7655 +
7656 + if (q && q->value)
7657 + spec->force_connect = true;
7658 +
7659 /*
7660 * hdmi_add_pin() assumes total amount of converters to
7661 * be known, so first discover all converters
7662 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
7663 index 06bbcfbb28153..d1b74c7cacd76 100644
7664 --- a/sound/pci/hda/patch_realtek.c
7665 +++ b/sound/pci/hda/patch_realtek.c
7666 @@ -6137,6 +6137,7 @@ enum {
7667 ALC269_FIXUP_CZC_L101,
7668 ALC269_FIXUP_LEMOTE_A1802,
7669 ALC269_FIXUP_LEMOTE_A190X,
7670 + ALC256_FIXUP_INTEL_NUC8_RUGGED,
7671 };
7672
7673 static const struct hda_fixup alc269_fixups[] = {
7674 @@ -7458,6 +7459,15 @@ static const struct hda_fixup alc269_fixups[] = {
7675 },
7676 .chain_id = ALC269_FIXUP_DMIC,
7677 },
7678 + [ALC256_FIXUP_INTEL_NUC8_RUGGED] = {
7679 + .type = HDA_FIXUP_PINS,
7680 + .v.pins = (const struct hda_pintbl[]) {
7681 + { 0x1b, 0x01a1913c }, /* use as headset mic, without its own jack detect */
7682 + { }
7683 + },
7684 + .chained = true,
7685 + .chain_id = ALC269_FIXUP_HEADSET_MODE
7686 + },
7687 };
7688
7689 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7690 @@ -7757,6 +7767,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
7691 SND_PCI_QUIRK(0x10ec, 0x118c, "Medion EE4254 MD62100", ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE),
7692 SND_PCI_QUIRK(0x1c06, 0x2013, "Lemote A1802", ALC269_FIXUP_LEMOTE_A1802),
7693 SND_PCI_QUIRK(0x1c06, 0x2015, "Lemote A190X", ALC269_FIXUP_LEMOTE_A190X),
7694 + SND_PCI_QUIRK(0x8086, 0x2080, "Intel NUC 8 Rugged", ALC256_FIXUP_INTEL_NUC8_RUGGED),
7695
7696 #if 0
7697 /* Below is a quirk table taken from the old code.
7698 @@ -7928,6 +7939,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
7699 {.id = ALC299_FIXUP_PREDATOR_SPK, .name = "predator-spk"},
7700 {.id = ALC298_FIXUP_HUAWEI_MBX_STEREO, .name = "huawei-mbx-stereo"},
7701 {.id = ALC256_FIXUP_MEDION_HEADSET_NO_PRESENCE, .name = "alc256-medion-headset"},
7702 + {.id = ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET, .name = "alc298-samsung-headphone"},
7703 {}
7704 };
7705 #define ALC225_STANDARD_PINS \
7706 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
7707 index 4b9300babc7d0..bfd3fe5eff31c 100644
7708 --- a/sound/pci/hda/patch_sigmatel.c
7709 +++ b/sound/pci/hda/patch_sigmatel.c
7710 @@ -832,7 +832,7 @@ static int stac_auto_create_beep_ctls(struct hda_codec *codec,
7711 static struct snd_kcontrol_new beep_vol_ctl =
7712 HDA_CODEC_VOLUME(NULL, 0, 0, 0);
7713
7714 - /* check for mute support for the the amp */
7715 + /* check for mute support for the amp */
7716 if ((caps & AC_AMPCAP_MUTE) >> AC_AMPCAP_MUTE_SHIFT) {
7717 const struct snd_kcontrol_new *temp;
7718 if (spec->anabeep_nid == nid)
7719 diff --git a/sound/pci/ice1712/prodigy192.c b/sound/pci/ice1712/prodigy192.c
7720 index 98f8ac6587962..243f757da3edb 100644
7721 --- a/sound/pci/ice1712/prodigy192.c
7722 +++ b/sound/pci/ice1712/prodigy192.c
7723 @@ -32,7 +32,7 @@
7724 * Experimentally I found out that only a combination of
7725 * OCKS0=1, OCKS1=1 (128fs, 64fs output) and ice1724 -
7726 * VT1724_MT_I2S_MCLK_128X=0 (256fs input) yields correct
7727 - * sampling rate. That means the the FPGA doubles the
7728 + * sampling rate. That means that the FPGA doubles the
7729 * MCK01 rate.
7730 *
7731 * Copyright (c) 2003 Takashi Iwai <tiwai@suse.de>
7732 diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
7733 index c3f8721624cd4..b90421a1d909a 100644
7734 --- a/sound/pci/oxygen/xonar_dg.c
7735 +++ b/sound/pci/oxygen/xonar_dg.c
7736 @@ -29,7 +29,7 @@
7737 * GPIO 4 <- headphone detect
7738 * GPIO 5 -> enable ADC analog circuit for the left channel
7739 * GPIO 6 -> enable ADC analog circuit for the right channel
7740 - * GPIO 7 -> switch green rear output jack between CS4245 and and the first
7741 + * GPIO 7 -> switch green rear output jack between CS4245 and the first
7742 * channel of CS4361 (mechanical relay)
7743 * GPIO 8 -> enable output to speakers
7744 *
7745 diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
7746 index 18535b326680a..04f23477039a5 100644
7747 --- a/sound/soc/codecs/wm8958-dsp2.c
7748 +++ b/sound/soc/codecs/wm8958-dsp2.c
7749 @@ -416,8 +416,12 @@ int wm8958_aif_ev(struct snd_soc_dapm_widget *w,
7750 struct snd_kcontrol *kcontrol, int event)
7751 {
7752 struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
7753 + struct wm8994 *control = dev_get_drvdata(component->dev->parent);
7754 int i;
7755
7756 + if (control->type != WM8958)
7757 + return 0;
7758 +
7759 switch (event) {
7760 case SND_SOC_DAPM_POST_PMU:
7761 case SND_SOC_DAPM_PRE_PMU:
7762 diff --git a/sound/soc/img/img-i2s-in.c b/sound/soc/img/img-i2s-in.c
7763 index 869fe0068cbd3..bb668551dd4b2 100644
7764 --- a/sound/soc/img/img-i2s-in.c
7765 +++ b/sound/soc/img/img-i2s-in.c
7766 @@ -343,8 +343,10 @@ static int img_i2s_in_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7767 chan_control_mask = IMG_I2S_IN_CH_CTL_CLK_TRANS_MASK;
7768
7769 ret = pm_runtime_get_sync(i2s->dev);
7770 - if (ret < 0)
7771 + if (ret < 0) {
7772 + pm_runtime_put_noidle(i2s->dev);
7773 return ret;
7774 + }
7775
7776 for (i = 0; i < i2s->active_channels; i++)
7777 img_i2s_in_ch_disable(i2s, i);
7778 diff --git a/sound/soc/img/img-parallel-out.c b/sound/soc/img/img-parallel-out.c
7779 index 5ddbe3a31c2e9..4da49a42e8547 100644
7780 --- a/sound/soc/img/img-parallel-out.c
7781 +++ b/sound/soc/img/img-parallel-out.c
7782 @@ -163,8 +163,10 @@ static int img_prl_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
7783 }
7784
7785 ret = pm_runtime_get_sync(prl->dev);
7786 - if (ret < 0)
7787 + if (ret < 0) {
7788 + pm_runtime_put_noidle(prl->dev);
7789 return ret;
7790 + }
7791
7792 reg = img_prl_out_readl(prl, IMG_PRL_OUT_CTL);
7793 reg = (reg & ~IMG_PRL_OUT_CTL_EDGE_MASK) | control_set;
7794 diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
7795 index 635eacbd28d47..156e3b9d613c6 100644
7796 --- a/sound/soc/tegra/tegra30_ahub.c
7797 +++ b/sound/soc/tegra/tegra30_ahub.c
7798 @@ -643,8 +643,10 @@ static int tegra30_ahub_resume(struct device *dev)
7799 int ret;
7800
7801 ret = pm_runtime_get_sync(dev);
7802 - if (ret < 0)
7803 + if (ret < 0) {
7804 + pm_runtime_put(dev);
7805 return ret;
7806 + }
7807 ret = regcache_sync(ahub->regmap_ahub);
7808 ret |= regcache_sync(ahub->regmap_apbif);
7809 pm_runtime_put(dev);
7810 diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
7811 index e6d548fa980b6..8894b7c16a01a 100644
7812 --- a/sound/soc/tegra/tegra30_i2s.c
7813 +++ b/sound/soc/tegra/tegra30_i2s.c
7814 @@ -538,8 +538,10 @@ static int tegra30_i2s_resume(struct device *dev)
7815 int ret;
7816
7817 ret = pm_runtime_get_sync(dev);
7818 - if (ret < 0)
7819 + if (ret < 0) {
7820 + pm_runtime_put(dev);
7821 return ret;
7822 + }
7823 ret = regcache_sync(i2s->regmap);
7824 pm_runtime_put(dev);
7825
7826 diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
7827 index 1573229d8cf4c..8c3b3a291ddbf 100644
7828 --- a/sound/usb/quirks-table.h
7829 +++ b/sound/usb/quirks-table.h
7830 @@ -2695,6 +2695,10 @@ YAMAHA_DEVICE(0x7010, "UB99"),
7831 .ifnum = QUIRK_ANY_INTERFACE,
7832 .type = QUIRK_COMPOSITE,
7833 .data = (const struct snd_usb_audio_quirk[]) {
7834 + {
7835 + .ifnum = 0,
7836 + .type = QUIRK_AUDIO_STANDARD_MIXER,
7837 + },
7838 {
7839 .ifnum = 0,
7840 .type = QUIRK_AUDIO_FIXED_ENDPOINT,
7841 @@ -2707,6 +2711,32 @@ YAMAHA_DEVICE(0x7010, "UB99"),
7842 .attributes = UAC_EP_CS_ATTR_SAMPLE_RATE,
7843 .endpoint = 0x01,
7844 .ep_attr = USB_ENDPOINT_XFER_ISOC,
7845 + .datainterval = 1,
7846 + .maxpacksize = 0x024c,
7847 + .rates = SNDRV_PCM_RATE_44100 |
7848 + SNDRV_PCM_RATE_48000,
7849 + .rate_min = 44100,
7850 + .rate_max = 48000,
7851 + .nr_rates = 2,
7852 + .rate_table = (unsigned int[]) {
7853 + 44100, 48000
7854 + }
7855 + }
7856 + },
7857 + {
7858 + .ifnum = 0,
7859 + .type = QUIRK_AUDIO_FIXED_ENDPOINT,
7860 + .data = &(const struct audioformat) {
7861 + .formats = SNDRV_PCM_FMTBIT_S24_3LE,
7862 + .channels = 2,
7863 + .iface = 0,
7864 + .altsetting = 1,
7865 + .altset_idx = 1,
7866 + .attributes = 0,
7867 + .endpoint = 0x82,
7868 + .ep_attr = USB_ENDPOINT_XFER_ISOC,
7869 + .datainterval = 1,
7870 + .maxpacksize = 0x0126,
7871 .rates = SNDRV_PCM_RATE_44100 |
7872 SNDRV_PCM_RATE_48000,
7873 .rate_min = 44100,
7874 @@ -3675,8 +3705,8 @@ ALC1220_VB_DESKTOP(0x26ce, 0x0a01), /* Asrock TRX40 Creator */
7875 * they pretend to be 96kHz mono as a workaround for stereo being broken
7876 * by that...
7877 *
7878 - * They also have swapped L-R channels, but that's for userspace to deal
7879 - * with.
7880 + * They also have an issue with initial stream alignment that causes the
7881 + * channels to be swapped and out of phase, which is dealt with in quirks.c.
7882 */
7883 {
7884 .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
7885 diff --git a/tools/testing/selftests/net/icmp_redirect.sh b/tools/testing/selftests/net/icmp_redirect.sh
7886 index 18c5de53558af..bf361f30d6ef9 100755
7887 --- a/tools/testing/selftests/net/icmp_redirect.sh
7888 +++ b/tools/testing/selftests/net/icmp_redirect.sh
7889 @@ -180,6 +180,8 @@ setup()
7890 ;;
7891 r[12]) ip netns exec $ns sysctl -q -w net.ipv4.ip_forward=1
7892 ip netns exec $ns sysctl -q -w net.ipv4.conf.all.send_redirects=1
7893 + ip netns exec $ns sysctl -q -w net.ipv4.conf.default.rp_filter=0
7894 + ip netns exec $ns sysctl -q -w net.ipv4.conf.all.rp_filter=0
7895
7896 ip netns exec $ns sysctl -q -w net.ipv6.conf.all.forwarding=1
7897 ip netns exec $ns sysctl -q -w net.ipv6.route.mtu_expires=10
7898 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
7899 index a2d7b0e3dca97..a26ac122c759f 100644
7900 --- a/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
7901 +++ b/tools/testing/selftests/powerpc/pmu/ebb/back_to_back_ebbs_test.c
7902 @@ -91,8 +91,6 @@ int back_to_back_ebbs(void)
7903 ebb_global_disable();
7904 ebb_freeze_pmcs();
7905
7906 - count_pmc(1, sample_period);
7907 -
7908 dump_ebb_state();
7909
7910 event_close(&event);
7911 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
7912 index bc893813483ee..bb9f587fa76e8 100644
7913 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
7914 +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_test.c
7915 @@ -42,8 +42,6 @@ int cycles(void)
7916 ebb_global_disable();
7917 ebb_freeze_pmcs();
7918
7919 - count_pmc(1, sample_period);
7920 -
7921 dump_ebb_state();
7922
7923 event_close(&event);
7924 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
7925 index dcd351d203289..9ae795ce314e6 100644
7926 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
7927 +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_freeze_test.c
7928 @@ -99,8 +99,6 @@ int cycles_with_freeze(void)
7929 ebb_global_disable();
7930 ebb_freeze_pmcs();
7931
7932 - count_pmc(1, sample_period);
7933 -
7934 dump_ebb_state();
7935
7936 printf("EBBs while frozen %d\n", ebbs_while_frozen);
7937 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
7938 index 94c99c12c0f23..4b45a2e70f62b 100644
7939 --- a/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
7940 +++ b/tools/testing/selftests/powerpc/pmu/ebb/cycles_with_mmcr2_test.c
7941 @@ -71,8 +71,6 @@ int cycles_with_mmcr2(void)
7942 ebb_global_disable();
7943 ebb_freeze_pmcs();
7944
7945 - count_pmc(1, sample_period);
7946 -
7947 dump_ebb_state();
7948
7949 event_close(&event);
7950 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
7951 index dfbc5c3ad52d7..21537d6eb6b7d 100644
7952 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
7953 +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb.c
7954 @@ -396,8 +396,6 @@ int ebb_child(union pipe read_pipe, union pipe write_pipe)
7955 ebb_global_disable();
7956 ebb_freeze_pmcs();
7957
7958 - count_pmc(1, sample_period);
7959 -
7960 dump_ebb_state();
7961
7962 event_close(&event);
7963 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
7964 index ca2f7d729155b..b208bf6ad58d3 100644
7965 --- a/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
7966 +++ b/tools/testing/selftests/powerpc/pmu/ebb/ebb_on_willing_child_test.c
7967 @@ -38,8 +38,6 @@ static int victim_child(union pipe read_pipe, union pipe write_pipe)
7968 ebb_global_disable();
7969 ebb_freeze_pmcs();
7970
7971 - count_pmc(1, sample_period);
7972 -
7973 dump_ebb_state();
7974
7975 FAIL_IF(ebb_state.stats.ebb_count == 0);
7976 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
7977 index ac3e6e182614a..ba2681a12cc7b 100644
7978 --- a/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
7979 +++ b/tools/testing/selftests/powerpc/pmu/ebb/lost_exception_test.c
7980 @@ -75,7 +75,6 @@ static int test_body(void)
7981 ebb_freeze_pmcs();
7982 ebb_global_disable();
7983
7984 - count_pmc(4, sample_period);
7985 mtspr(SPRN_PMC4, 0xdead);
7986
7987 dump_summary_ebb_state();
7988 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
7989 index b8242e9d97d2d..791d37ba327b5 100644
7990 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
7991 +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_counter_test.c
7992 @@ -70,13 +70,6 @@ int multi_counter(void)
7993 ebb_global_disable();
7994 ebb_freeze_pmcs();
7995
7996 - count_pmc(1, sample_period);
7997 - count_pmc(2, sample_period);
7998 - count_pmc(3, sample_period);
7999 - count_pmc(4, sample_period);
8000 - count_pmc(5, sample_period);
8001 - count_pmc(6, sample_period);
8002 -
8003 dump_ebb_state();
8004
8005 for (i = 0; i < 6; i++)
8006 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
8007 index a05c0e18ded63..9b0f70d597020 100644
8008 --- a/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
8009 +++ b/tools/testing/selftests/powerpc/pmu/ebb/multi_ebb_procs_test.c
8010 @@ -61,8 +61,6 @@ static int cycles_child(void)
8011 ebb_global_disable();
8012 ebb_freeze_pmcs();
8013
8014 - count_pmc(1, sample_period);
8015 -
8016 dump_summary_ebb_state();
8017
8018 event_close(&event);
8019 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
8020 index 153ebc92234fd..2904c741e04e5 100644
8021 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
8022 +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmae_handling_test.c
8023 @@ -82,8 +82,6 @@ static int test_body(void)
8024 ebb_global_disable();
8025 ebb_freeze_pmcs();
8026
8027 - count_pmc(1, sample_period);
8028 -
8029 dump_ebb_state();
8030
8031 if (mmcr0_mismatch)
8032 diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
8033 index eadad75ed7e6f..b29f8ba22d1e6 100644
8034 --- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
8035 +++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
8036 @@ -76,8 +76,6 @@ int pmc56_overflow(void)
8037 ebb_global_disable();
8038 ebb_freeze_pmcs();
8039
8040 - count_pmc(2, sample_period);
8041 -
8042 dump_ebb_state();
8043
8044 printf("PMC5/6 overflow %d\n", pmc56_overflowed);