Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0160-5.4.61-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3619 - (show annotations) (download)
Wed Aug 26 13:11:12 2020 UTC (3 years, 8 months ago) by niro
File size: 122475 byte(s)
-linux-5.4.61
1 diff --git a/Documentation/kbuild/index.rst b/Documentation/kbuild/index.rst
2 index 0f144fad99a6a..3882bd5f7728c 100644
3 --- a/Documentation/kbuild/index.rst
4 +++ b/Documentation/kbuild/index.rst
5 @@ -19,6 +19,7 @@ Kernel Build System
6
7 issues
8 reproducible-builds
9 + llvm
10
11 .. only:: subproject and html
12
13 diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst
14 index f1e5dce86af7c..852ccc551bb3a 100644
15 --- a/Documentation/kbuild/kbuild.rst
16 +++ b/Documentation/kbuild/kbuild.rst
17 @@ -262,3 +262,8 @@ KBUILD_BUILD_USER, KBUILD_BUILD_HOST
18 These two variables allow to override the user@host string displayed during
19 boot and in /proc/version. The default value is the output of the commands
20 whoami and host, respectively.
21 +
22 +LLVM
23 +----
24 +If this variable is set to 1, Kbuild will use Clang and LLVM utilities instead
25 +of GCC and GNU binutils to build the kernel.
26 diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst
27 new file mode 100644
28 index 0000000000000..c776b6eee969f
29 --- /dev/null
30 +++ b/Documentation/kbuild/llvm.rst
31 @@ -0,0 +1,87 @@
32 +==============================
33 +Building Linux with Clang/LLVM
34 +==============================
35 +
36 +This document covers how to build the Linux kernel with Clang and LLVM
37 +utilities.
38 +
39 +About
40 +-----
41 +
42 +The Linux kernel has always traditionally been compiled with GNU toolchains
43 +such as GCC and binutils. Ongoing work has allowed for `Clang
44 +<https://clang.llvm.org/>`_ and `LLVM <https://llvm.org/>`_ utilities to be
45 +used as viable substitutes. Distributions such as `Android
46 +<https://www.android.com/>`_, `ChromeOS
47 +<https://www.chromium.org/chromium-os>`_, and `OpenMandriva
48 +<https://www.openmandriva.org/>`_ use Clang built kernels. `LLVM is a
49 +collection of toolchain components implemented in terms of C++ objects
50 +<https://www.aosabook.org/en/llvm.html>`_. Clang is a front-end to LLVM that
51 +supports C and the GNU C extensions required by the kernel, and is pronounced
52 +"klang," not "see-lang."
53 +
54 +Clang
55 +-----
56 +
57 +The compiler used can be swapped out via `CC=` command line argument to `make`.
58 +`CC=` should be set when selecting a config and during a build.
59 +
60 + make CC=clang defconfig
61 +
62 + make CC=clang
63 +
64 +Cross Compiling
65 +---------------
66 +
67 +A single Clang compiler binary will typically contain all supported backends,
68 +which can help simplify cross compiling.
69 +
70 + ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang
71 +
72 +`CROSS_COMPILE` is not used to prefix the Clang compiler binary, instead
73 +`CROSS_COMPILE` is used to set a command line flag: `--target <triple>`. For
74 +example:
75 +
76 + clang --target aarch64-linux-gnu foo.c
77 +
78 +LLVM Utilities
79 +--------------
80 +
81 +LLVM has substitutes for GNU binutils utilities. Kbuild supports `LLVM=1`
82 +to enable them.
83 +
84 + make LLVM=1
85 +
86 +They can be enabled individually. The full list of the parameters:
87 +
88 + make CC=clang LD=ld.lld AR=llvm-ar NM=llvm-nm STRIP=llvm-strip \\
89 + OBJCOPY=llvm-objcopy OBJDUMP=llvm-objdump OBJSIZE=llvm-size \\
90 + READELF=llvm-readelf HOSTCC=clang HOSTCXX=clang++ HOSTAR=llvm-ar \\
91 + HOSTLD=ld.lld
92 +
93 +Currently, the integrated assembler is disabled by default. You can pass
94 +`LLVM_IAS=1` to enable it.
95 +
96 +Getting Help
97 +------------
98 +
99 +- `Website <https://clangbuiltlinux.github.io/>`_
100 +- `Mailing List <https://groups.google.com/forum/#!forum/clang-built-linux>`_: <clang-built-linux@googlegroups.com>
101 +- `Issue Tracker <https://github.com/ClangBuiltLinux/linux/issues>`_
102 +- IRC: #clangbuiltlinux on chat.freenode.net
103 +- `Telegram <https://t.me/ClangBuiltLinux>`_: @ClangBuiltLinux
104 +- `Wiki <https://github.com/ClangBuiltLinux/linux/wiki>`_
105 +- `Beginner Bugs <https://github.com/ClangBuiltLinux/linux/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22>`_
106 +
107 +Getting LLVM
108 +-------------
109 +
110 +- http://releases.llvm.org/download.html
111 +- https://github.com/llvm/llvm-project
112 +- https://llvm.org/docs/GettingStarted.html
113 +- https://llvm.org/docs/CMake.html
114 +- https://apt.llvm.org/
115 +- https://www.archlinux.org/packages/extra/x86_64/llvm/
116 +- https://github.com/ClangBuiltLinux/tc-build
117 +- https://github.com/ClangBuiltLinux/linux/wiki/Building-Clang-from-source
118 +- https://android.googlesource.com/platform/prebuilts/clang/host/linux-x86/
119 diff --git a/MAINTAINERS b/MAINTAINERS
120 index fe6fa5d3a63e5..1407008df7491 100644
121 --- a/MAINTAINERS
122 +++ b/MAINTAINERS
123 @@ -4028,6 +4028,7 @@ B: https://github.com/ClangBuiltLinux/linux/issues
124 C: irc://chat.freenode.net/clangbuiltlinux
125 S: Supported
126 K: \b(?i:clang|llvm)\b
127 +F: Documentation/kbuild/llvm.rst
128
129 CLEANCACHE API
130 M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
131 diff --git a/Makefile b/Makefile
132 index 7c001e21e28e7..2c21b922644d7 100644
133 --- a/Makefile
134 +++ b/Makefile
135 @@ -1,7 +1,7 @@
136 # SPDX-License-Identifier: GPL-2.0
137 VERSION = 5
138 PATCHLEVEL = 4
139 -SUBLEVEL = 60
140 +SUBLEVEL = 61
141 EXTRAVERSION =
142 NAME = Kleptomaniac Octopus
143
144 @@ -394,8 +394,13 @@ HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
145 HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
146 HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
147
148 -HOSTCC = gcc
149 -HOSTCXX = g++
150 +ifneq ($(LLVM),)
151 +HOSTCC = clang
152 +HOSTCXX = clang++
153 +else
154 +HOSTCC = gcc
155 +HOSTCXX = g++
156 +endif
157 KBUILD_HOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 \
158 -fomit-frame-pointer -std=gnu89 $(HOST_LFS_CFLAGS) \
159 $(HOSTCFLAGS)
160 @@ -404,16 +409,28 @@ KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
161 KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
162
163 # Make variables (CC, etc...)
164 -AS = $(CROSS_COMPILE)as
165 -LD = $(CROSS_COMPILE)ld
166 -CC = $(CROSS_COMPILE)gcc
167 CPP = $(CC) -E
168 +ifneq ($(LLVM),)
169 +CC = clang
170 +LD = ld.lld
171 +AR = llvm-ar
172 +NM = llvm-nm
173 +OBJCOPY = llvm-objcopy
174 +OBJDUMP = llvm-objdump
175 +READELF = llvm-readelf
176 +OBJSIZE = llvm-size
177 +STRIP = llvm-strip
178 +else
179 +CC = $(CROSS_COMPILE)gcc
180 +LD = $(CROSS_COMPILE)ld
181 AR = $(CROSS_COMPILE)ar
182 NM = $(CROSS_COMPILE)nm
183 -STRIP = $(CROSS_COMPILE)strip
184 OBJCOPY = $(CROSS_COMPILE)objcopy
185 OBJDUMP = $(CROSS_COMPILE)objdump
186 +READELF = $(CROSS_COMPILE)readelf
187 OBJSIZE = $(CROSS_COMPILE)size
188 +STRIP = $(CROSS_COMPILE)strip
189 +endif
190 PAHOLE = pahole
191 LEX = flex
192 YACC = bison
193 @@ -422,7 +439,6 @@ INSTALLKERNEL := installkernel
194 DEPMOD = /sbin/depmod
195 PERL = perl
196 PYTHON = python
197 -PYTHON2 = python2
198 PYTHON3 = python3
199 CHECK = sparse
200 BASH = bash
201 @@ -471,9 +487,9 @@ KBUILD_LDFLAGS :=
202 GCC_PLUGINS_CFLAGS :=
203 CLANG_FLAGS :=
204
205 -export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE AS LD CC
206 -export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE PAHOLE LEX YACC AWK INSTALLKERNEL
207 -export PERL PYTHON PYTHON2 PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
208 +export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC
209 +export CPP AR NM STRIP OBJCOPY OBJDUMP OBJSIZE READELF PAHOLE LEX YACC AWK INSTALLKERNEL
210 +export PERL PYTHON PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
211 export KBUILD_HOSTCXXFLAGS KBUILD_HOSTLDFLAGS KBUILD_HOSTLDLIBS LDFLAGS_MODULE
212
213 export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
214 @@ -534,7 +550,7 @@ endif
215 ifneq ($(GCC_TOOLCHAIN),)
216 CLANG_FLAGS += --gcc-toolchain=$(GCC_TOOLCHAIN)
217 endif
218 -ifeq ($(shell $(AS) --version 2>&1 | head -n 1 | grep clang),)
219 +ifneq ($(LLVM_IAS),1)
220 CLANG_FLAGS += -no-integrated-as
221 endif
222 CLANG_FLAGS += -Werror=unknown-warning-option
223 diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
224 index b771bf1b53523..103270d5a9fc6 100644
225 --- a/arch/alpha/include/asm/io.h
226 +++ b/arch/alpha/include/asm/io.h
227 @@ -502,10 +502,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
228 }
229 #endif
230
231 -#define ioread16be(p) be16_to_cpu(ioread16(p))
232 -#define ioread32be(p) be32_to_cpu(ioread32(p))
233 -#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
234 -#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
235 +#define ioread16be(p) swab16(ioread16(p))
236 +#define ioread32be(p) swab32(ioread32(p))
237 +#define iowrite16be(v,p) iowrite16(swab16(v), (p))
238 +#define iowrite32be(v,p) iowrite32(swab32(v), (p))
239
240 #define inb_p inb
241 #define inw_p inw
242 diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
243 index 1b179b1f46bc5..dd03d5e01a946 100644
244 --- a/arch/arm/include/asm/kvm_host.h
245 +++ b/arch/arm/include/asm/kvm_host.h
246 @@ -266,7 +266,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
247
248 #define KVM_ARCH_WANT_MMU_NOTIFIER
249 int kvm_unmap_hva_range(struct kvm *kvm,
250 - unsigned long start, unsigned long end);
251 + unsigned long start, unsigned long end, unsigned flags);
252 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
253
254 unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
255 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
256 index d65aef47ece3b..11a7d6208087f 100644
257 --- a/arch/arm64/Makefile
258 +++ b/arch/arm64/Makefile
259 @@ -146,6 +146,7 @@ zinstall install:
260 PHONY += vdso_install
261 vdso_install:
262 $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso $@
263 + $(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso32 $@
264
265 # We use MRPROPER_FILES and CLEAN_FILES now
266 archclean:
267 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
268 index 0c3bd6aff6e91..d719c6b4dd81c 100644
269 --- a/arch/arm64/include/asm/kvm_host.h
270 +++ b/arch/arm64/include/asm/kvm_host.h
271 @@ -427,7 +427,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
272
273 #define KVM_ARCH_WANT_MMU_NOTIFIER
274 int kvm_unmap_hva_range(struct kvm *kvm,
275 - unsigned long start, unsigned long end);
276 + unsigned long start, unsigned long end, unsigned flags);
277 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
278 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
279 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
280 diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
281 index 76b327f88fbb1..40dffe60b8454 100644
282 --- a/arch/arm64/kernel/vdso32/Makefile
283 +++ b/arch/arm64/kernel/vdso32/Makefile
284 @@ -190,7 +190,7 @@ quiet_cmd_vdsosym = VDSOSYM $@
285 cmd_vdsosym = $(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
286
287 # Install commands for the unstripped file
288 -quiet_cmd_vdso_install = INSTALL $@
289 +quiet_cmd_vdso_install = INSTALL32 $@
290 cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/vdso32.so
291
292 vdso.so: $(obj)/vdso.so.dbg
293 diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
294 index 9138a624c5c81..692f90e7fecc1 100644
295 --- a/arch/m68k/include/asm/m53xxacr.h
296 +++ b/arch/m68k/include/asm/m53xxacr.h
297 @@ -89,9 +89,9 @@
298 * coherency though in all cases. And for copyback caches we will need
299 * to push cached data as well.
300 */
301 -#define CACHE_INIT CACR_CINVA
302 -#define CACHE_INVALIDATE CACR_CINVA
303 -#define CACHE_INVALIDATED CACR_CINVA
304 +#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
305 +#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
306 +#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
307
308 #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
309 (0x000f0000) + \
310 diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
311 index 7b47a323dc23e..356c61074d136 100644
312 --- a/arch/mips/include/asm/kvm_host.h
313 +++ b/arch/mips/include/asm/kvm_host.h
314 @@ -939,7 +939,7 @@ enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
315
316 #define KVM_ARCH_WANT_MMU_NOTIFIER
317 int kvm_unmap_hva_range(struct kvm *kvm,
318 - unsigned long start, unsigned long end);
319 + unsigned long start, unsigned long end, unsigned flags);
320 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
321 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
322 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
323 diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
324 index 7b06e6ee6817d..b8884de89c81e 100644
325 --- a/arch/mips/kernel/setup.c
326 +++ b/arch/mips/kernel/setup.c
327 @@ -494,7 +494,7 @@ static void __init mips_parse_crashkernel(void)
328 if (ret != 0 || crash_size <= 0)
329 return;
330
331 - if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) {
332 + if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) {
333 pr_warn("Invalid memory region reserved for crash kernel\n");
334 return;
335 }
336 diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
337 index 97e538a8c1be2..97f63a84aa51f 100644
338 --- a/arch/mips/kvm/mmu.c
339 +++ b/arch/mips/kvm/mmu.c
340 @@ -512,7 +512,8 @@ static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
341 return 1;
342 }
343
344 -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
345 +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
346 + unsigned flags)
347 {
348 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
349
350 diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
351 index 6fe6ad64cba57..740b52ec35097 100644
352 --- a/arch/powerpc/include/asm/kvm_host.h
353 +++ b/arch/powerpc/include/asm/kvm_host.h
354 @@ -58,7 +58,8 @@
355 #define KVM_ARCH_WANT_MMU_NOTIFIER
356
357 extern int kvm_unmap_hva_range(struct kvm *kvm,
358 - unsigned long start, unsigned long end);
359 + unsigned long start, unsigned long end,
360 + unsigned flags);
361 extern int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
362 extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
363 extern int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
364 diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
365 index ec2547cc5ecbe..1ff971f3b06f9 100644
366 --- a/arch/powerpc/kvm/book3s.c
367 +++ b/arch/powerpc/kvm/book3s.c
368 @@ -867,7 +867,8 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
369 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new, change);
370 }
371
372 -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
373 +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
374 + unsigned flags)
375 {
376 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
377 }
378 diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
379 index 321db0fdb9db8..7154bd424d243 100644
380 --- a/arch/powerpc/kvm/e500_mmu_host.c
381 +++ b/arch/powerpc/kvm/e500_mmu_host.c
382 @@ -734,7 +734,8 @@ static int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
383 return 0;
384 }
385
386 -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
387 +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
388 + unsigned flags)
389 {
390 /* kvm_unmap_hva flushes everything anyways */
391 kvm_unmap_hva(kvm, start);
392 diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
393 index 13ef77fd648f4..b3c4848869e52 100644
394 --- a/arch/powerpc/platforms/pseries/ras.c
395 +++ b/arch/powerpc/platforms/pseries/ras.c
396 @@ -184,7 +184,6 @@ static void handle_system_shutdown(char event_modifier)
397 case EPOW_SHUTDOWN_ON_UPS:
398 pr_emerg("Loss of system power detected. System is running on"
399 " UPS/battery. Check RTAS error log for details\n");
400 - orderly_poweroff(true);
401 break;
402
403 case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
404 diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
405 index 5aa786063eb3e..c6aef2ecf2890 100644
406 --- a/arch/s390/kernel/ptrace.c
407 +++ b/arch/s390/kernel/ptrace.c
408 @@ -1283,7 +1283,6 @@ static bool is_ri_cb_valid(struct runtime_instr_cb *cb)
409 cb->pc == 1 &&
410 cb->qc == 0 &&
411 cb->reserved2 == 0 &&
412 - cb->key == PAGE_DEFAULT_KEY &&
413 cb->reserved3 == 0 &&
414 cb->reserved4 == 0 &&
415 cb->reserved5 == 0 &&
416 @@ -1347,7 +1346,11 @@ static int s390_runtime_instr_set(struct task_struct *target,
417 kfree(data);
418 return -EINVAL;
419 }
420 -
421 + /*
422 + * Override access key in any case, since user space should
423 + * not be able to set it, nor should it care about it.
424 + */
425 + ri_cb.key = PAGE_DEFAULT_KEY >> 4;
426 preempt_disable();
427 if (!target->thread.ri_cb)
428 target->thread.ri_cb = data;
429 diff --git a/arch/s390/kernel/runtime_instr.c b/arch/s390/kernel/runtime_instr.c
430 index 125c7f6e87150..1788a5454b6fc 100644
431 --- a/arch/s390/kernel/runtime_instr.c
432 +++ b/arch/s390/kernel/runtime_instr.c
433 @@ -57,7 +57,7 @@ static void init_runtime_instr_cb(struct runtime_instr_cb *cb)
434 cb->k = 1;
435 cb->ps = 1;
436 cb->pc = 1;
437 - cb->key = PAGE_DEFAULT_KEY;
438 + cb->key = PAGE_DEFAULT_KEY >> 4;
439 cb->v = 1;
440 }
441
442 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
443 index 6b84afdd75382..98aac5b4bdb7e 100644
444 --- a/arch/x86/boot/compressed/Makefile
445 +++ b/arch/x86/boot/compressed/Makefile
446 @@ -102,7 +102,7 @@ vmlinux-objs-$(CONFIG_EFI_MIXED) += $(obj)/efi_thunk_$(BITS).o
447 quiet_cmd_check_data_rel = DATAREL $@
448 define cmd_check_data_rel
449 for obj in $(filter %.o,$^); do \
450 - ${CROSS_COMPILE}readelf -S $$obj | grep -qF .rel.local && { \
451 + $(READELF) -S $$obj | grep -qF .rel.local && { \
452 echo "error: $$obj has data relocations!" >&2; \
453 exit 1; \
454 } || true; \
455 diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
456 index 742de9d97ba14..c41686641c3fb 100644
457 --- a/arch/x86/include/asm/kvm_host.h
458 +++ b/arch/x86/include/asm/kvm_host.h
459 @@ -1553,7 +1553,8 @@ asmlinkage void kvm_spurious_fault(void);
460 _ASM_EXTABLE(666b, 667b)
461
462 #define KVM_ARCH_WANT_MMU_NOTIFIER
463 -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end);
464 +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
465 + unsigned flags);
466 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
467 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
468 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
469 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
470 index 342d9ddf35c3a..bb743f956c232 100644
471 --- a/arch/x86/kvm/mmu.c
472 +++ b/arch/x86/kvm/mmu.c
473 @@ -2045,7 +2045,8 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
474 return kvm_handle_hva_range(kvm, hva, hva + 1, data, handler);
475 }
476
477 -int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
478 +int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
479 + unsigned flags)
480 {
481 return kvm_handle_hva_range(kvm, start, end, 0, kvm_unmap_rmapp);
482 }
483 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
484 index 38b2df0e71096..8920ee7b28811 100644
485 --- a/arch/x86/kvm/x86.c
486 +++ b/arch/x86/kvm/x86.c
487 @@ -972,7 +972,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
488 {
489 unsigned long old_cr4 = kvm_read_cr4(vcpu);
490 unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
491 - X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE;
492 + X86_CR4_SMEP;
493
494 if (kvm_valid_cr4(vcpu, cr4))
495 return 1;
496 diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
497 index 91220cc258547..5c11ae66b5d8e 100644
498 --- a/arch/x86/pci/xen.c
499 +++ b/arch/x86/pci/xen.c
500 @@ -26,6 +26,7 @@
501 #include <asm/xen/pci.h>
502 #include <asm/xen/cpuid.h>
503 #include <asm/apic.h>
504 +#include <asm/acpi.h>
505 #include <asm/i8259.h>
506
507 static int xen_pcifront_enable_irq(struct pci_dev *dev)
508 diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
509 index d3d7c4ef7d045..53dc0fd6f6d3c 100644
510 --- a/drivers/cpufreq/intel_pstate.c
511 +++ b/drivers/cpufreq/intel_pstate.c
512 @@ -1571,6 +1571,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
513
514 intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
515 cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
516 + cpu->pstate.turbo_pstate = phy_max;
517 } else {
518 cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
519 }
520 diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
521 index c1167ef5d2b35..b299e22b7532a 100644
522 --- a/drivers/firmware/efi/efi.c
523 +++ b/drivers/firmware/efi/efi.c
524 @@ -345,6 +345,7 @@ static int __init efisubsys_init(void)
525 efi_kobj = kobject_create_and_add("efi", firmware_kobj);
526 if (!efi_kobj) {
527 pr_err("efi: Firmware registration failed.\n");
528 + destroy_workqueue(efi_rts_wq);
529 return -ENOMEM;
530 }
531
532 @@ -381,6 +382,7 @@ err_unregister:
533 generic_ops_unregister();
534 err_put:
535 kobject_put(efi_kobj);
536 + destroy_workqueue(efi_rts_wq);
537 return error;
538 }
539
540 diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
541 index 6091194a3955c..2c0eb7140ca0e 100644
542 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
543 +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
544 @@ -1434,6 +1434,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
545
546 drm_connector_update_edid_property(connector,
547 aconnector->edid);
548 + drm_add_edid_modes(connector, aconnector->edid);
549
550 if (aconnector->dc_link->aux_mode)
551 drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
552 diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
553 index c13dce760098c..05b98eadc2899 100644
554 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
555 +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
556 @@ -2845,7 +2845,7 @@ static bool dcn20_validate_bandwidth_internal(struct dc *dc, struct dc_state *co
557 int vlevel = 0;
558 int pipe_split_from[MAX_PIPES];
559 int pipe_cnt = 0;
560 - display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_KERNEL);
561 + display_e2e_pipe_params_st *pipes = kzalloc(dc->res_pool->pipe_count * sizeof(display_e2e_pipe_params_st), GFP_ATOMIC);
562 DC_LOGGER_INIT(dc->ctx->logger);
563
564 BW_VAL_TRACE_COUNT();
565 diff --git a/drivers/gpu/drm/amd/display/include/fixed31_32.h b/drivers/gpu/drm/amd/display/include/fixed31_32.h
566 index 89ef9f6860e5b..16df2a485dd0d 100644
567 --- a/drivers/gpu/drm/amd/display/include/fixed31_32.h
568 +++ b/drivers/gpu/drm/amd/display/include/fixed31_32.h
569 @@ -431,6 +431,9 @@ struct fixed31_32 dc_fixpt_log(struct fixed31_32 arg);
570 */
571 static inline struct fixed31_32 dc_fixpt_pow(struct fixed31_32 arg1, struct fixed31_32 arg2)
572 {
573 + if (arg1.value == 0)
574 + return arg2.value == 0 ? dc_fixpt_one : dc_fixpt_zero;
575 +
576 return dc_fixpt_exp(
577 dc_fixpt_mul(
578 dc_fixpt_log(arg1),
579 diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
580 index 46dc3de7e81bf..f2bad14ac04ab 100644
581 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
582 +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
583 @@ -358,8 +358,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
584 static int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
585 void *buf, int len, int write)
586 {
587 - unsigned long offset = (addr) - vma->vm_start;
588 struct ttm_buffer_object *bo = vma->vm_private_data;
589 + unsigned long offset = (addr) - vma->vm_start +
590 + ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
591 + << PAGE_SHIFT);
592 int ret;
593
594 if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
595 diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
596 index 909eba43664a2..204d1df5a21d1 100644
597 --- a/drivers/gpu/drm/vgem/vgem_drv.c
598 +++ b/drivers/gpu/drm/vgem/vgem_drv.c
599 @@ -229,32 +229,6 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
600 return 0;
601 }
602
603 -static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev,
604 - uint32_t handle, uint64_t *offset)
605 -{
606 - struct drm_gem_object *obj;
607 - int ret;
608 -
609 - obj = drm_gem_object_lookup(file, handle);
610 - if (!obj)
611 - return -ENOENT;
612 -
613 - if (!obj->filp) {
614 - ret = -EINVAL;
615 - goto unref;
616 - }
617 -
618 - ret = drm_gem_create_mmap_offset(obj);
619 - if (ret)
620 - goto unref;
621 -
622 - *offset = drm_vma_node_offset_addr(&obj->vma_node);
623 -unref:
624 - drm_gem_object_put_unlocked(obj);
625 -
626 - return ret;
627 -}
628 -
629 static struct drm_ioctl_desc vgem_ioctls[] = {
630 DRM_IOCTL_DEF_DRV(VGEM_FENCE_ATTACH, vgem_fence_attach_ioctl, DRM_RENDER_ALLOW),
631 DRM_IOCTL_DEF_DRV(VGEM_FENCE_SIGNAL, vgem_fence_signal_ioctl, DRM_RENDER_ALLOW),
632 @@ -448,7 +422,6 @@ static struct drm_driver vgem_driver = {
633 .fops = &vgem_driver_fops,
634
635 .dumb_create = vgem_gem_dumb_create,
636 - .dumb_map_offset = vgem_gem_dumb_map,
637
638 .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
639 .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
640 diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c
641 index 27e2df44d043d..cfe5f47d9890e 100644
642 --- a/drivers/infiniband/hw/bnxt_re/main.c
643 +++ b/drivers/infiniband/hw/bnxt_re/main.c
644 @@ -789,7 +789,8 @@ static int bnxt_re_handle_qp_async_event(struct creq_qp_event *qp_event,
645 struct ib_event event;
646 unsigned int flags;
647
648 - if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR) {
649 + if (qp->qplib_qp.state == CMDQ_MODIFY_QP_NEW_STATE_ERR &&
650 + rdma_is_kernel_res(&qp->ib_qp.res)) {
651 flags = bnxt_re_lock_cqs(qp);
652 bnxt_qplib_add_flush_qp(&qp->qplib_qp);
653 bnxt_re_unlock_cqs(qp, flags);
654 diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
655 index 7c6fd720fb2ea..c018fc633cca3 100644
656 --- a/drivers/infiniband/hw/hfi1/tid_rdma.c
657 +++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
658 @@ -3215,6 +3215,7 @@ bool hfi1_tid_rdma_wqe_interlock(struct rvt_qp *qp, struct rvt_swqe *wqe)
659 case IB_WR_ATOMIC_CMP_AND_SWP:
660 case IB_WR_ATOMIC_FETCH_AND_ADD:
661 case IB_WR_RDMA_WRITE:
662 + case IB_WR_RDMA_WRITE_WITH_IMM:
663 switch (prev->wr.opcode) {
664 case IB_WR_TID_RDMA_WRITE:
665 req = wqe_to_tid_req(prev);
666 diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
667 index 527ae0b9a191e..0b4a3039f312f 100644
668 --- a/drivers/input/mouse/psmouse-base.c
669 +++ b/drivers/input/mouse/psmouse-base.c
670 @@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
671 {
672 int type = *((unsigned int *)kp->arg);
673
674 - return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
675 + return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
676 }
677
678 static int __init psmouse_init(void)
679 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
680 index 25ad64a3919f6..2cbfcd99b7ee7 100644
681 --- a/drivers/md/bcache/super.c
682 +++ b/drivers/md/bcache/super.c
683 @@ -816,19 +816,19 @@ static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
684 struct request_queue *q;
685 const size_t max_stripes = min_t(size_t, INT_MAX,
686 SIZE_MAX / sizeof(atomic_t));
687 - size_t n;
688 + uint64_t n;
689 int idx;
690
691 if (!d->stripe_size)
692 d->stripe_size = 1 << 31;
693
694 - d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
695 -
696 - if (!d->nr_stripes || d->nr_stripes > max_stripes) {
697 - pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
698 - (unsigned int)d->nr_stripes);
699 + n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
700 + if (!n || n > max_stripes) {
701 + pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
702 + n);
703 return -ENOMEM;
704 }
705 + d->nr_stripes = n;
706
707 n = d->nr_stripes * sizeof(atomic_t);
708 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
709 diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
710 index fadbdeeb44955..293867b9e7961 100644
711 --- a/drivers/media/pci/ttpci/budget-core.c
712 +++ b/drivers/media/pci/ttpci/budget-core.c
713 @@ -369,20 +369,25 @@ static int budget_register(struct budget *budget)
714 ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
715
716 if (ret < 0)
717 - return ret;
718 + goto err_release_dmx;
719
720 budget->mem_frontend.source = DMX_MEMORY_FE;
721 ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
722 if (ret < 0)
723 - return ret;
724 + goto err_release_dmx;
725
726 ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
727 if (ret < 0)
728 - return ret;
729 + goto err_release_dmx;
730
731 dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
732
733 return 0;
734 +
735 +err_release_dmx:
736 + dvb_dmxdev_release(&budget->dmxdev);
737 + dvb_dmx_release(&budget->demux);
738 + return ret;
739 }
740
741 static void budget_unregister(struct budget *budget)
742 diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
743 index d38d2bbb6f0f8..7000f0bf0b353 100644
744 --- a/drivers/media/platform/davinci/vpss.c
745 +++ b/drivers/media/platform/davinci/vpss.c
746 @@ -505,19 +505,31 @@ static void vpss_exit(void)
747
748 static int __init vpss_init(void)
749 {
750 + int ret;
751 +
752 if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
753 return -EBUSY;
754
755 oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
756 if (unlikely(!oper_cfg.vpss_regs_base2)) {
757 - release_mem_region(VPSS_CLK_CTRL, 4);
758 - return -ENOMEM;
759 + ret = -ENOMEM;
760 + goto err_ioremap;
761 }
762
763 writel(VPSS_CLK_CTRL_VENCCLKEN |
764 - VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
765 + VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
766 +
767 + ret = platform_driver_register(&vpss_driver);
768 + if (ret)
769 + goto err_pd_register;
770 +
771 + return 0;
772
773 - return platform_driver_register(&vpss_driver);
774 +err_pd_register:
775 + iounmap(oper_cfg.vpss_regs_base2);
776 +err_ioremap:
777 + release_mem_region(VPSS_CLK_CTRL, 4);
778 + return ret;
779 }
780 subsys_initcall(vpss_init);
781 module_exit(vpss_exit);
782 diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
783 index 3fdc9f964a3c6..2483641799dfb 100644
784 --- a/drivers/media/platform/qcom/camss/camss.c
785 +++ b/drivers/media/platform/qcom/camss/camss.c
786 @@ -504,7 +504,6 @@ static int camss_of_parse_ports(struct camss *camss)
787 return num_subdevs;
788
789 err_cleanup:
790 - v4l2_async_notifier_cleanup(&camss->notifier);
791 of_node_put(node);
792 return ret;
793 }
794 @@ -835,29 +834,38 @@ static int camss_probe(struct platform_device *pdev)
795 camss->csid_num = 4;
796 camss->vfe_num = 2;
797 } else {
798 - return -EINVAL;
799 + ret = -EINVAL;
800 + goto err_free;
801 }
802
803 camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
804 sizeof(*camss->csiphy), GFP_KERNEL);
805 - if (!camss->csiphy)
806 - return -ENOMEM;
807 + if (!camss->csiphy) {
808 + ret = -ENOMEM;
809 + goto err_free;
810 + }
811
812 camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
813 GFP_KERNEL);
814 - if (!camss->csid)
815 - return -ENOMEM;
816 + if (!camss->csid) {
817 + ret = -ENOMEM;
818 + goto err_free;
819 + }
820
821 camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
822 GFP_KERNEL);
823 - if (!camss->vfe)
824 - return -ENOMEM;
825 + if (!camss->vfe) {
826 + ret = -ENOMEM;
827 + goto err_free;
828 + }
829
830 v4l2_async_notifier_init(&camss->notifier);
831
832 num_subdevs = camss_of_parse_ports(camss);
833 - if (num_subdevs < 0)
834 - return num_subdevs;
835 + if (num_subdevs < 0) {
836 + ret = num_subdevs;
837 + goto err_cleanup;
838 + }
839
840 ret = camss_init_subdevices(camss);
841 if (ret < 0)
842 @@ -936,6 +944,8 @@ err_register_entities:
843 v4l2_device_unregister(&camss->v4l2_dev);
844 err_cleanup:
845 v4l2_async_notifier_cleanup(&camss->notifier);
846 +err_free:
847 + kfree(camss);
848
849 return ret;
850 }
851 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
852 index 499845c32b1bc..0d7a173f8e61c 100644
853 --- a/drivers/net/bonding/bond_main.c
854 +++ b/drivers/net/bonding/bond_main.c
855 @@ -2037,7 +2037,8 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
856 int ret;
857
858 ret = __bond_release_one(bond_dev, slave_dev, false, true);
859 - if (ret == 0 && !bond_has_slaves(bond)) {
860 + if (ret == 0 && !bond_has_slaves(bond) &&
861 + bond_dev->reg_state != NETREG_UNREGISTERING) {
862 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
863 netdev_info(bond_dev, "Destroying bond\n");
864 bond_remove_proc_entry(bond);
865 @@ -2777,6 +2778,9 @@ static int bond_ab_arp_inspect(struct bonding *bond)
866 if (bond_time_in_interval(bond, last_rx, 1)) {
867 bond_propose_link_state(slave, BOND_LINK_UP);
868 commit++;
869 + } else if (slave->link == BOND_LINK_BACK) {
870 + bond_propose_link_state(slave, BOND_LINK_FAIL);
871 + commit++;
872 }
873 continue;
874 }
875 @@ -2885,6 +2889,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
876
877 continue;
878
879 + case BOND_LINK_FAIL:
880 + bond_set_slave_link_state(slave, BOND_LINK_FAIL,
881 + BOND_SLAVE_NOTIFY_NOW);
882 + bond_set_slave_inactive_flags(slave,
883 + BOND_SLAVE_NOTIFY_NOW);
884 +
885 + /* A slave has just been enslaved and has become
886 + * the current active slave.
887 + */
888 + if (rtnl_dereference(bond->curr_active_slave))
889 + RCU_INIT_POINTER(bond->current_arp_slave, NULL);
890 + continue;
891 +
892 default:
893 slave_err(bond->dev, slave->dev,
894 "impossible: link_new_state %d on slave\n",
895 @@ -2935,8 +2952,6 @@ static bool bond_ab_arp_probe(struct bonding *bond)
896 return should_notify_rtnl;
897 }
898
899 - bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
900 -
901 bond_for_each_slave_rcu(bond, slave, iter) {
902 if (!found && !before && bond_slave_is_up(slave))
903 before = slave;
904 @@ -4246,13 +4261,23 @@ static netdev_tx_t bond_start_xmit(struct sk_buff *skb, struct net_device *dev)
905 return ret;
906 }
907
908 +static u32 bond_mode_bcast_speed(struct slave *slave, u32 speed)
909 +{
910 + if (speed == 0 || speed == SPEED_UNKNOWN)
911 + speed = slave->speed;
912 + else
913 + speed = min(speed, slave->speed);
914 +
915 + return speed;
916 +}
917 +
918 static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
919 struct ethtool_link_ksettings *cmd)
920 {
921 struct bonding *bond = netdev_priv(bond_dev);
922 - unsigned long speed = 0;
923 struct list_head *iter;
924 struct slave *slave;
925 + u32 speed = 0;
926
927 cmd->base.duplex = DUPLEX_UNKNOWN;
928 cmd->base.port = PORT_OTHER;
929 @@ -4264,8 +4289,13 @@ static int bond_ethtool_get_link_ksettings(struct net_device *bond_dev,
930 */
931 bond_for_each_slave(bond, slave, iter) {
932 if (bond_slave_can_tx(slave)) {
933 - if (slave->speed != SPEED_UNKNOWN)
934 - speed += slave->speed;
935 + if (slave->speed != SPEED_UNKNOWN) {
936 + if (BOND_MODE(bond) == BOND_MODE_BROADCAST)
937 + speed = bond_mode_bcast_speed(slave,
938 + speed);
939 + else
940 + speed += slave->speed;
941 + }
942 if (cmd->base.duplex == DUPLEX_UNKNOWN &&
943 slave->duplex != DUPLEX_UNKNOWN)
944 cmd->base.duplex = slave->duplex;
945 diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
946 index 14850b7fe6d7f..4bd66ba72c03c 100644
947 --- a/drivers/net/dsa/b53/b53_common.c
948 +++ b/drivers/net/dsa/b53/b53_common.c
949 @@ -1523,6 +1523,8 @@ static int b53_arl_op(struct b53_device *dev, int op, int port,
950 return ret;
951
952 switch (ret) {
953 + case -ETIMEDOUT:
954 + return ret;
955 case -ENOSPC:
956 dev_dbg(dev->dev, "{%pM,%.4d} no space left in ARL\n",
957 addr, vid);
958 diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
959 index 26325f7b3c1fa..4d0d13d5d0998 100644
960 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
961 +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
962 @@ -2835,16 +2835,14 @@ static void ena_fw_reset_device(struct work_struct *work)
963 {
964 struct ena_adapter *adapter =
965 container_of(work, struct ena_adapter, reset_task);
966 - struct pci_dev *pdev = adapter->pdev;
967
968 - if (unlikely(!test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
969 - dev_err(&pdev->dev,
970 - "device reset schedule while reset bit is off\n");
971 - return;
972 - }
973 rtnl_lock();
974 - ena_destroy_device(adapter, false);
975 - ena_restore_device(adapter);
976 +
977 + if (likely(test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))) {
978 + ena_destroy_device(adapter, false);
979 + ena_restore_device(adapter);
980 + }
981 +
982 rtnl_unlock();
983 }
984
985 @@ -3675,8 +3673,11 @@ static void __ena_shutoff(struct pci_dev *pdev, bool shutdown)
986 netdev->rx_cpu_rmap = NULL;
987 }
988 #endif /* CONFIG_RFS_ACCEL */
989 - del_timer_sync(&adapter->timer_service);
990
991 + /* Make sure timer and reset routine won't be called after
992 + * freeing device resources.
993 + */
994 + del_timer_sync(&adapter->timer_service);
995 cancel_work_sync(&adapter->reset_task);
996
997 rtnl_lock(); /* lock released inside the below if-else block */
998 diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c
999 index 01ae113f122a0..28d4c54505f9a 100644
1000 --- a/drivers/net/ethernet/cortina/gemini.c
1001 +++ b/drivers/net/ethernet/cortina/gemini.c
1002 @@ -2388,7 +2388,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
1003
1004 dev_info(dev, "probe %s ID %d\n", dev_name(dev), id);
1005
1006 - netdev = alloc_etherdev_mq(sizeof(*port), TX_QUEUE_NUM);
1007 + netdev = devm_alloc_etherdev_mqs(dev, sizeof(*port), TX_QUEUE_NUM, TX_QUEUE_NUM);
1008 if (!netdev) {
1009 dev_err(dev, "Can't allocate ethernet device #%d\n", id);
1010 return -ENOMEM;
1011 @@ -2520,7 +2520,6 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
1012 }
1013
1014 port->netdev = NULL;
1015 - free_netdev(netdev);
1016 return ret;
1017 }
1018
1019 @@ -2529,7 +2528,6 @@ static int gemini_ethernet_port_remove(struct platform_device *pdev)
1020 struct gemini_ethernet_port *port = platform_get_drvdata(pdev);
1021
1022 gemini_port_remove(port);
1023 - free_netdev(port->netdev);
1024 return 0;
1025 }
1026
1027 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
1028 index 39c112f1543c1..a0e4b12ac4ea2 100644
1029 --- a/drivers/net/ethernet/freescale/fec_main.c
1030 +++ b/drivers/net/ethernet/freescale/fec_main.c
1031 @@ -3707,11 +3707,11 @@ failed_mii_init:
1032 failed_irq:
1033 failed_init:
1034 fec_ptp_stop(pdev);
1035 - if (fep->reg_phy)
1036 - regulator_disable(fep->reg_phy);
1037 failed_reset:
1038 pm_runtime_put_noidle(&pdev->dev);
1039 pm_runtime_disable(&pdev->dev);
1040 + if (fep->reg_phy)
1041 + regulator_disable(fep->reg_phy);
1042 failed_regulator:
1043 clk_disable_unprepare(fep->clk_ahb);
1044 failed_clk_ahb:
1045 diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1046 index 69a2daaca5c56..d7684ac2522ef 100644
1047 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1048 +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
1049 @@ -1211,7 +1211,7 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
1050 #define I40E_AQC_SET_VSI_PROMISC_BROADCAST 0x04
1051 #define I40E_AQC_SET_VSI_DEFAULT 0x08
1052 #define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
1053 -#define I40E_AQC_SET_VSI_PROMISC_TX 0x8000
1054 +#define I40E_AQC_SET_VSI_PROMISC_RX_ONLY 0x8000
1055 __le16 seid;
1056 #define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
1057 __le16 vlan_tag;
1058 diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
1059 index 3160b5bbe6728..66f7deaf46ae2 100644
1060 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c
1061 +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
1062 @@ -1949,6 +1949,21 @@ i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
1063 return status;
1064 }
1065
1066 +/**
1067 + * i40e_is_aq_api_ver_ge
1068 + * @aq: pointer to AdminQ info containing HW API version to compare
1069 + * @maj: API major value
1070 + * @min: API minor value
1071 + *
1072 + * Assert whether current HW API version is greater/equal than provided.
1073 + **/
1074 +static bool i40e_is_aq_api_ver_ge(struct i40e_adminq_info *aq, u16 maj,
1075 + u16 min)
1076 +{
1077 + return (aq->api_maj_ver > maj ||
1078 + (aq->api_maj_ver == maj && aq->api_min_ver >= min));
1079 +}
1080 +
1081 /**
1082 * i40e_aq_add_vsi
1083 * @hw: pointer to the hw struct
1084 @@ -2074,18 +2089,16 @@ i40e_status i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
1085
1086 if (set) {
1087 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1088 - if (rx_only_promisc &&
1089 - (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
1090 - (hw->aq.api_maj_ver > 1)))
1091 - flags |= I40E_AQC_SET_VSI_PROMISC_TX;
1092 + if (rx_only_promisc && i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1093 + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1094 }
1095
1096 cmd->promiscuous_flags = cpu_to_le16(flags);
1097
1098 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1099 - if (((hw->aq.api_maj_ver >= 1) && (hw->aq.api_min_ver >= 5)) ||
1100 - (hw->aq.api_maj_ver > 1))
1101 - cmd->valid_flags |= cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_TX);
1102 + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1103 + cmd->valid_flags |=
1104 + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1105
1106 cmd->seid = cpu_to_le16(seid);
1107 status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
1108 @@ -2182,11 +2195,17 @@ enum i40e_status_code i40e_aq_set_vsi_uc_promisc_on_vlan(struct i40e_hw *hw,
1109 i40e_fill_default_direct_cmd_desc(&desc,
1110 i40e_aqc_opc_set_vsi_promiscuous_modes);
1111
1112 - if (enable)
1113 + if (enable) {
1114 flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
1115 + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1116 + flags |= I40E_AQC_SET_VSI_PROMISC_RX_ONLY;
1117 + }
1118
1119 cmd->promiscuous_flags = cpu_to_le16(flags);
1120 cmd->valid_flags = cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_UNICAST);
1121 + if (i40e_is_aq_api_ver_ge(&hw->aq, 1, 5))
1122 + cmd->valid_flags |=
1123 + cpu_to_le16(I40E_AQC_SET_VSI_PROMISC_RX_ONLY);
1124 cmd->seid = cpu_to_le16(seid);
1125 cmd->vlan_tag = cpu_to_le16(vid | I40E_AQC_SET_VSI_VLAN_VALID);
1126
1127 diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
1128 index 095ed81cc0ba4..b3c3911adfc2e 100644
1129 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c
1130 +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
1131 @@ -15342,6 +15342,9 @@ static void i40e_remove(struct pci_dev *pdev)
1132 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
1133 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
1134
1135 + while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state))
1136 + usleep_range(1000, 2000);
1137 +
1138 /* no more scheduling of any task */
1139 set_bit(__I40E_SUSPENDED, pf->state);
1140 set_bit(__I40E_DOWN, pf->state);
1141 diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
1142 index 24bb721a12bc0..42eb7a7ecd96b 100644
1143 --- a/drivers/net/hyperv/netvsc_drv.c
1144 +++ b/drivers/net/hyperv/netvsc_drv.c
1145 @@ -501,7 +501,7 @@ static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
1146 int rc;
1147
1148 skb->dev = vf_netdev;
1149 - skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
1150 + skb_record_rx_queue(skb, qdisc_skb_cb(skb)->slave_dev_queue_mapping);
1151
1152 rc = dev_queue_xmit(skb);
1153 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
1154 diff --git a/drivers/net/wan/Kconfig b/drivers/net/wan/Kconfig
1155 index dd1a147f29716..058d77d2e693d 100644
1156 --- a/drivers/net/wan/Kconfig
1157 +++ b/drivers/net/wan/Kconfig
1158 @@ -200,7 +200,7 @@ config WANXL_BUILD_FIRMWARE
1159 depends on WANXL && !PREVENT_FIRMWARE_BUILD
1160 help
1161 Allows you to rebuild firmware run by the QUICC processor.
1162 - It requires as68k, ld68k and hexdump programs.
1163 + It requires m68k toolchains and hexdump programs.
1164
1165 You should never need this option, say N.
1166
1167 diff --git a/drivers/net/wan/Makefile b/drivers/net/wan/Makefile
1168 index 701f5d2fe3b61..cf7a0a65aae8d 100644
1169 --- a/drivers/net/wan/Makefile
1170 +++ b/drivers/net/wan/Makefile
1171 @@ -40,17 +40,17 @@ $(obj)/wanxl.o: $(obj)/wanxlfw.inc
1172
1173 ifeq ($(CONFIG_WANXL_BUILD_FIRMWARE),y)
1174 ifeq ($(ARCH),m68k)
1175 - AS68K = $(AS)
1176 - LD68K = $(LD)
1177 + M68KCC = $(CC)
1178 + M68KLD = $(LD)
1179 else
1180 - AS68K = as68k
1181 - LD68K = ld68k
1182 + M68KCC = $(CROSS_COMPILE_M68K)gcc
1183 + M68KLD = $(CROSS_COMPILE_M68K)ld
1184 endif
1185
1186 quiet_cmd_build_wanxlfw = BLD FW $@
1187 cmd_build_wanxlfw = \
1188 - $(CPP) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi $< | $(AS68K) -m68360 -o $(obj)/wanxlfw.o; \
1189 - $(LD68K) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
1190 + $(M68KCC) -D__ASSEMBLY__ -Wp,-MD,$(depfile) -I$(srctree)/include/uapi -c -o $(obj)/wanxlfw.o $<; \
1191 + $(M68KLD) --oformat binary -Ttext 0x1000 $(obj)/wanxlfw.o -o $(obj)/wanxlfw.bin; \
1192 hexdump -ve '"\n" 16/1 "0x%02X,"' $(obj)/wanxlfw.bin | sed 's/0x ,//g;1s/^/static const u8 firmware[]={/;$$s/,$$/\n};\n/' >$(obj)/wanxlfw.inc; \
1193 rm -f $(obj)/wanxlfw.bin $(obj)/wanxlfw.o
1194
1195 diff --git a/drivers/opp/core.c b/drivers/opp/core.c
1196 index 9ff0538ee83a0..7b057c32e11b1 100644
1197 --- a/drivers/opp/core.c
1198 +++ b/drivers/opp/core.c
1199 @@ -843,10 +843,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
1200
1201 /* Return early if nothing to do */
1202 if (old_freq == freq) {
1203 - dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
1204 - __func__, freq);
1205 - ret = 0;
1206 - goto put_opp_table;
1207 + if (!opp_table->required_opp_tables && !opp_table->regulators) {
1208 + dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
1209 + __func__, freq);
1210 + ret = 0;
1211 + goto put_opp_table;
1212 + }
1213 }
1214
1215 temp_freq = old_freq;
1216 diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
1217 index 1a3420ee6a4d9..d5083b013fbce 100644
1218 --- a/drivers/rtc/rtc-goldfish.c
1219 +++ b/drivers/rtc/rtc-goldfish.c
1220 @@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
1221 rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC;
1222 writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
1223 writel(rtc_alarm64, base + TIMER_ALARM_LOW);
1224 + writel(1, base + TIMER_IRQ_ENABLED);
1225 } else {
1226 /*
1227 * if this function was called with enabled=0
1228 diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
1229 index cf63916814cca..5c652deb6fed4 100644
1230 --- a/drivers/s390/scsi/zfcp_fsf.c
1231 +++ b/drivers/s390/scsi/zfcp_fsf.c
1232 @@ -409,7 +409,7 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
1233 return;
1234 }
1235
1236 - del_timer(&req->timer);
1237 + del_timer_sync(&req->timer);
1238 zfcp_fsf_protstatus_eval(req);
1239 zfcp_fsf_fsfstatus_eval(req);
1240 req->handler(req);
1241 @@ -762,7 +762,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
1242 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free);
1243 req->issued = get_tod_clock();
1244 if (zfcp_qdio_send(qdio, &req->qdio_req)) {
1245 - del_timer(&req->timer);
1246 + del_timer_sync(&req->timer);
1247 /* lookup request again, list might have changed */
1248 zfcp_reqlist_find_rm(adapter->req_list, req_id);
1249 zfcp_erp_adapter_reopen(adapter, 0, "fsrs__1");
1250 diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
1251 index 2b865c6423e29..e00dc4693fcbd 100644
1252 --- a/drivers/scsi/libfc/fc_disc.c
1253 +++ b/drivers/scsi/libfc/fc_disc.c
1254 @@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1255
1256 if (PTR_ERR(fp) == -FC_EX_CLOSED)
1257 goto out;
1258 - if (IS_ERR(fp))
1259 - goto redisc;
1260 + if (IS_ERR(fp)) {
1261 + mutex_lock(&disc->disc_mutex);
1262 + fc_disc_restart(disc);
1263 + mutex_unlock(&disc->disc_mutex);
1264 + goto out;
1265 + }
1266
1267 cp = fc_frame_payload_get(fp, sizeof(*cp));
1268 if (!cp)
1269 @@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
1270 new_rdata->disc_id = disc->disc_id;
1271 fc_rport_login(new_rdata);
1272 }
1273 - goto out;
1274 + goto free_fp;
1275 }
1276 rdata->disc_id = disc->disc_id;
1277 mutex_unlock(&rdata->rp_mutex);
1278 @@ -626,6 +630,8 @@ redisc:
1279 fc_disc_restart(disc);
1280 mutex_unlock(&disc->disc_mutex);
1281 }
1282 +free_fp:
1283 + fc_frame_free(fp);
1284 out:
1285 kref_put(&rdata->kref, fc_rport_destroy);
1286 if (!IS_ERR(fp))
1287 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
1288 index d7ec4083a0911..d91c95d9981ac 100644
1289 --- a/drivers/scsi/qla2xxx/qla_os.c
1290 +++ b/drivers/scsi/qla2xxx/qla_os.c
1291 @@ -2804,10 +2804,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1292 /* This may fail but that's ok */
1293 pci_enable_pcie_error_reporting(pdev);
1294
1295 - /* Turn off T10-DIF when FC-NVMe is enabled */
1296 - if (ql2xnvmeenable)
1297 - ql2xenabledif = 0;
1298 -
1299 ha = kzalloc(sizeof(struct qla_hw_data), GFP_KERNEL);
1300 if (!ha) {
1301 ql_log_pci(ql_log_fatal, pdev, 0x0009,
1302 diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
1303 index fe6cad9b2a0d2..03985919150b9 100644
1304 --- a/drivers/scsi/ufs/ufs_quirks.h
1305 +++ b/drivers/scsi/ufs/ufs_quirks.h
1306 @@ -12,6 +12,7 @@
1307 #define UFS_ANY_VENDOR 0xFFFF
1308 #define UFS_ANY_MODEL "ANY_MODEL"
1309
1310 +#define UFS_VENDOR_MICRON 0x12C
1311 #define UFS_VENDOR_TOSHIBA 0x198
1312 #define UFS_VENDOR_SAMSUNG 0x1CE
1313 #define UFS_VENDOR_SKHYNIX 0x1AD
1314 diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
1315 index 2b6853c7375c9..b41b88bcab3d9 100644
1316 --- a/drivers/scsi/ufs/ufshcd.c
1317 +++ b/drivers/scsi/ufs/ufshcd.c
1318 @@ -217,6 +217,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
1319
1320 static struct ufs_dev_fix ufs_fixups[] = {
1321 /* UFS cards deviations table */
1322 + UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
1323 + UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
1324 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
1325 UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
1326 UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
1327 diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
1328 index 6f7fdcbb9151f..5bf7542087776 100644
1329 --- a/drivers/spi/Kconfig
1330 +++ b/drivers/spi/Kconfig
1331 @@ -944,4 +944,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
1332
1333 endif # SPI_SLAVE
1334
1335 +config SPI_DYNAMIC
1336 + def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
1337 +
1338 endif # SPI
1339 diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
1340 index b222ce8d083ef..7e92ab0cc9920 100644
1341 --- a/drivers/spi/spi-stm32.c
1342 +++ b/drivers/spi/spi-stm32.c
1343 @@ -14,6 +14,7 @@
1344 #include <linux/iopoll.h>
1345 #include <linux/module.h>
1346 #include <linux/of_platform.h>
1347 +#include <linux/pinctrl/consumer.h>
1348 #include <linux/pm_runtime.h>
1349 #include <linux/reset.h>
1350 #include <linux/spi/spi.h>
1351 @@ -1986,6 +1987,8 @@ static int stm32_spi_remove(struct platform_device *pdev)
1352
1353 pm_runtime_disable(&pdev->dev);
1354
1355 + pinctrl_pm_select_sleep_state(&pdev->dev);
1356 +
1357 return 0;
1358 }
1359
1360 @@ -1997,13 +2000,18 @@ static int stm32_spi_runtime_suspend(struct device *dev)
1361
1362 clk_disable_unprepare(spi->clk);
1363
1364 - return 0;
1365 + return pinctrl_pm_select_sleep_state(dev);
1366 }
1367
1368 static int stm32_spi_runtime_resume(struct device *dev)
1369 {
1370 struct spi_master *master = dev_get_drvdata(dev);
1371 struct stm32_spi *spi = spi_master_get_devdata(master);
1372 + int ret;
1373 +
1374 + ret = pinctrl_pm_select_default_state(dev);
1375 + if (ret)
1376 + return ret;
1377
1378 return clk_prepare_enable(spi->clk);
1379 }
1380 @@ -2033,10 +2041,23 @@ static int stm32_spi_resume(struct device *dev)
1381 return ret;
1382
1383 ret = spi_master_resume(master);
1384 - if (ret)
1385 + if (ret) {
1386 clk_disable_unprepare(spi->clk);
1387 + return ret;
1388 + }
1389
1390 - return ret;
1391 + ret = pm_runtime_get_sync(dev);
1392 + if (ret) {
1393 + dev_err(dev, "Unable to power device:%d\n", ret);
1394 + return ret;
1395 + }
1396 +
1397 + spi->cfg->config(spi);
1398 +
1399 + pm_runtime_mark_last_busy(dev);
1400 + pm_runtime_put_autosuspend(dev);
1401 +
1402 + return 0;
1403 }
1404 #endif
1405
1406 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
1407 index c6242f0a307f9..6a81b2a33cb4b 100644
1408 --- a/drivers/spi/spi.c
1409 +++ b/drivers/spi/spi.c
1410 @@ -475,6 +475,12 @@ static LIST_HEAD(spi_controller_list);
1411 */
1412 static DEFINE_MUTEX(board_lock);
1413
1414 +/*
1415 + * Prevents addition of devices with same chip select and
1416 + * addition of devices below an unregistering controller.
1417 + */
1418 +static DEFINE_MUTEX(spi_add_lock);
1419 +
1420 /**
1421 * spi_alloc_device - Allocate a new SPI device
1422 * @ctlr: Controller to which device is connected
1423 @@ -553,7 +559,6 @@ static int spi_dev_check(struct device *dev, void *data)
1424 */
1425 int spi_add_device(struct spi_device *spi)
1426 {
1427 - static DEFINE_MUTEX(spi_add_lock);
1428 struct spi_controller *ctlr = spi->controller;
1429 struct device *dev = ctlr->dev.parent;
1430 int status;
1431 @@ -581,6 +586,13 @@ int spi_add_device(struct spi_device *spi)
1432 goto done;
1433 }
1434
1435 + /* Controller may unregister concurrently */
1436 + if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
1437 + !device_is_registered(&ctlr->dev)) {
1438 + status = -ENODEV;
1439 + goto done;
1440 + }
1441 +
1442 /* Descriptors take precedence */
1443 if (ctlr->cs_gpiods)
1444 spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
1445 @@ -2582,6 +2594,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
1446 struct spi_controller *found;
1447 int id = ctlr->bus_num;
1448
1449 + /* Prevent addition of new devices, unregister existing ones */
1450 + if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
1451 + mutex_lock(&spi_add_lock);
1452 +
1453 device_for_each_child(&ctlr->dev, NULL, __unregister);
1454
1455 /* First make sure that this controller was ever added */
1456 @@ -2602,6 +2618,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
1457 if (found == ctlr)
1458 idr_remove(&spi_master_idr, id);
1459 mutex_unlock(&board_lock);
1460 +
1461 + if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
1462 + mutex_unlock(&spi_add_lock);
1463 }
1464 EXPORT_SYMBOL_GPL(spi_unregister_controller);
1465
1466 diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
1467 index a497e7c1f4fcc..d766fb14942b3 100644
1468 --- a/drivers/target/target_core_user.c
1469 +++ b/drivers/target/target_core_user.c
1470 @@ -601,7 +601,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
1471 size = round_up(size+offset, PAGE_SIZE);
1472
1473 while (size) {
1474 - flush_dcache_page(virt_to_page(start));
1475 + flush_dcache_page(vmalloc_to_page(start));
1476 start += PAGE_SIZE;
1477 size -= PAGE_SIZE;
1478 }
1479 diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
1480 index 6cc47af1f06d3..ca8c10aa4a4bc 100644
1481 --- a/drivers/vfio/vfio_iommu_type1.c
1482 +++ b/drivers/vfio/vfio_iommu_type1.c
1483 @@ -1187,13 +1187,16 @@ static int vfio_bus_type(struct device *dev, void *data)
1484 static int vfio_iommu_replay(struct vfio_iommu *iommu,
1485 struct vfio_domain *domain)
1486 {
1487 - struct vfio_domain *d;
1488 + struct vfio_domain *d = NULL;
1489 struct rb_node *n;
1490 unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1491 int ret;
1492
1493 /* Arbitrarily pick the first domain in the list for lookups */
1494 - d = list_first_entry(&iommu->domain_list, struct vfio_domain, next);
1495 + if (!list_empty(&iommu->domain_list))
1496 + d = list_first_entry(&iommu->domain_list,
1497 + struct vfio_domain, next);
1498 +
1499 n = rb_first(&iommu->dma_list);
1500
1501 for (; n; n = rb_next(n)) {
1502 @@ -1211,6 +1214,11 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
1503 phys_addr_t p;
1504 dma_addr_t i;
1505
1506 + if (WARN_ON(!d)) { /* mapped w/o a domain?! */
1507 + ret = -EINVAL;
1508 + goto unwind;
1509 + }
1510 +
1511 phys = iommu_iova_to_phys(d->domain, iova);
1512
1513 if (WARN_ON(!phys)) {
1514 @@ -1240,7 +1248,7 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
1515 if (npage <= 0) {
1516 WARN_ON(!npage);
1517 ret = (int)npage;
1518 - return ret;
1519 + goto unwind;
1520 }
1521
1522 phys = pfn << PAGE_SHIFT;
1523 @@ -1249,14 +1257,67 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
1524
1525 ret = iommu_map(domain->domain, iova, phys,
1526 size, dma->prot | domain->prot);
1527 - if (ret)
1528 - return ret;
1529 + if (ret) {
1530 + if (!dma->iommu_mapped)
1531 + vfio_unpin_pages_remote(dma, iova,
1532 + phys >> PAGE_SHIFT,
1533 + size >> PAGE_SHIFT,
1534 + true);
1535 + goto unwind;
1536 + }
1537
1538 iova += size;
1539 }
1540 + }
1541 +
1542 + /* All dmas are now mapped, defer to second tree walk for unwind */
1543 + for (n = rb_first(&iommu->dma_list); n; n = rb_next(n)) {
1544 + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
1545 +
1546 dma->iommu_mapped = true;
1547 }
1548 +
1549 return 0;
1550 +
1551 +unwind:
1552 + for (; n; n = rb_prev(n)) {
1553 + struct vfio_dma *dma = rb_entry(n, struct vfio_dma, node);
1554 + dma_addr_t iova;
1555 +
1556 + if (dma->iommu_mapped) {
1557 + iommu_unmap(domain->domain, dma->iova, dma->size);
1558 + continue;
1559 + }
1560 +
1561 + iova = dma->iova;
1562 + while (iova < dma->iova + dma->size) {
1563 + phys_addr_t phys, p;
1564 + size_t size;
1565 + dma_addr_t i;
1566 +
1567 + phys = iommu_iova_to_phys(domain->domain, iova);
1568 + if (!phys) {
1569 + iova += PAGE_SIZE;
1570 + continue;
1571 + }
1572 +
1573 + size = PAGE_SIZE;
1574 + p = phys + size;
1575 + i = iova + size;
1576 + while (i < dma->iova + dma->size &&
1577 + p == iommu_iova_to_phys(domain->domain, i)) {
1578 + size += PAGE_SIZE;
1579 + p += PAGE_SIZE;
1580 + i += PAGE_SIZE;
1581 + }
1582 +
1583 + iommu_unmap(domain->domain, iova, size);
1584 + vfio_unpin_pages_remote(dma, iova, phys >> PAGE_SHIFT,
1585 + size >> PAGE_SHIFT, true);
1586 + }
1587 + }
1588 +
1589 + return ret;
1590 }
1591
1592 /*
1593 diff --git a/drivers/video/fbdev/efifb.c b/drivers/video/fbdev/efifb.c
1594 index 51d97ec4f58f9..e0cbf5b3d2174 100644
1595 --- a/drivers/video/fbdev/efifb.c
1596 +++ b/drivers/video/fbdev/efifb.c
1597 @@ -453,7 +453,7 @@ static int efifb_probe(struct platform_device *dev)
1598 info->apertures->ranges[0].base = efifb_fix.smem_start;
1599 info->apertures->ranges[0].size = size_remap;
1600
1601 - if (efi_enabled(EFI_BOOT) &&
1602 + if (efi_enabled(EFI_MEMMAP) &&
1603 !efi_mem_desc_lookup(efifb_fix.smem_start, &md)) {
1604 if ((efifb_fix.smem_start + efifb_fix.smem_len) >
1605 (md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT))) {
1606 diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
1607 index 58b96baa8d488..4f7c73e6052f6 100644
1608 --- a/drivers/virtio/virtio_ring.c
1609 +++ b/drivers/virtio/virtio_ring.c
1610 @@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
1611 {
1612 struct vring_virtqueue *vq = to_vvq(_vq);
1613
1614 + if (unlikely(vq->broken))
1615 + return false;
1616 +
1617 virtio_mb(vq->weak_barriers);
1618 return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
1619 virtqueue_poll_split(_vq, last_used_idx);
1620 diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
1621 index 456a164364a22..98a9d6892d989 100644
1622 --- a/drivers/xen/preempt.c
1623 +++ b/drivers/xen/preempt.c
1624 @@ -27,7 +27,7 @@ EXPORT_SYMBOL_GPL(xen_in_preemptible_hcall);
1625 asmlinkage __visible void xen_maybe_preempt_hcall(void)
1626 {
1627 if (unlikely(__this_cpu_read(xen_in_preemptible_hcall)
1628 - && need_resched())) {
1629 + && need_resched() && !preempt_count())) {
1630 /*
1631 * Clear flag as we may be rescheduled on a different
1632 * cpu.
1633 diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
1634 index bd3a10dfac157..06346422f7432 100644
1635 --- a/drivers/xen/swiotlb-xen.c
1636 +++ b/drivers/xen/swiotlb-xen.c
1637 @@ -335,6 +335,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
1638 int order = get_order(size);
1639 phys_addr_t phys;
1640 u64 dma_mask = DMA_BIT_MASK(32);
1641 + struct page *page;
1642
1643 if (hwdev && hwdev->coherent_dma_mask)
1644 dma_mask = hwdev->coherent_dma_mask;
1645 @@ -346,9 +347,14 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
1646 /* Convert the size to actually allocated. */
1647 size = 1UL << (order + XEN_PAGE_SHIFT);
1648
1649 + if (is_vmalloc_addr(vaddr))
1650 + page = vmalloc_to_page(vaddr);
1651 + else
1652 + page = virt_to_page(vaddr);
1653 +
1654 if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
1655 range_straddles_page_boundary(phys, size)) &&
1656 - TestClearPageXenRemapped(virt_to_page(vaddr)))
1657 + TestClearPageXenRemapped(page))
1658 xen_destroy_contiguous_region(phys, order);
1659
1660 xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
1661 diff --git a/fs/afs/dynroot.c b/fs/afs/dynroot.c
1662 index 7503899c0a1b5..f07e53ab808e3 100644
1663 --- a/fs/afs/dynroot.c
1664 +++ b/fs/afs/dynroot.c
1665 @@ -289,15 +289,17 @@ void afs_dynroot_depopulate(struct super_block *sb)
1666 net->dynroot_sb = NULL;
1667 mutex_unlock(&net->proc_cells_lock);
1668
1669 - inode_lock(root->d_inode);
1670 -
1671 - /* Remove all the pins for dirs created for manually added cells */
1672 - list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
1673 - if (subdir->d_fsdata) {
1674 - subdir->d_fsdata = NULL;
1675 - dput(subdir);
1676 + if (root) {
1677 + inode_lock(root->d_inode);
1678 +
1679 + /* Remove all the pins for dirs created for manually added cells */
1680 + list_for_each_entry_safe(subdir, tmp, &root->d_subdirs, d_child) {
1681 + if (subdir->d_fsdata) {
1682 + subdir->d_fsdata = NULL;
1683 + dput(subdir);
1684 + }
1685 }
1686 - }
1687
1688 - inode_unlock(root->d_inode);
1689 + inode_unlock(root->d_inode);
1690 + }
1691 }
1692 diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c
1693 index 42d69e77f89d9..b167649f5f5de 100644
1694 --- a/fs/btrfs/block-group.c
1695 +++ b/fs/btrfs/block-group.c
1696 @@ -2168,7 +2168,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
1697 return 0;
1698 }
1699
1700 - if (trans->aborted)
1701 + if (TRANS_ABORTED(trans))
1702 return 0;
1703 again:
1704 inode = lookup_free_space_inode(block_group, path);
1705 diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
1706 index 2374f3f6f3b70..18357b054a91e 100644
1707 --- a/fs/btrfs/ctree.h
1708 +++ b/fs/btrfs/ctree.h
1709 @@ -2965,6 +2965,8 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
1710 int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
1711 unsigned long new_flags);
1712 int btrfs_sync_fs(struct super_block *sb, int wait);
1713 +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1714 + u64 subvol_objectid);
1715
1716 static inline __printf(2, 3) __cold
1717 void btrfs_no_printk(const struct btrfs_fs_info *fs_info, const char *fmt, ...)
1718 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
1719 index 5bcccfbcc7c15..a34ee9c2f3151 100644
1720 --- a/fs/btrfs/delayed-inode.c
1721 +++ b/fs/btrfs/delayed-inode.c
1722 @@ -1151,7 +1151,7 @@ static int __btrfs_run_delayed_items(struct btrfs_trans_handle *trans, int nr)
1723 int ret = 0;
1724 bool count = (nr > 0);
1725
1726 - if (trans->aborted)
1727 + if (TRANS_ABORTED(trans))
1728 return -EIO;
1729
1730 path = btrfs_alloc_path();
1731 diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
1732 index ddf28ecf17f93..93cceeba484cc 100644
1733 --- a/fs/btrfs/export.c
1734 +++ b/fs/btrfs/export.c
1735 @@ -57,9 +57,9 @@ static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
1736 return type;
1737 }
1738
1739 -static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
1740 - u64 root_objectid, u32 generation,
1741 - int check_generation)
1742 +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
1743 + u64 root_objectid, u32 generation,
1744 + int check_generation)
1745 {
1746 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
1747 struct btrfs_root *root;
1748 @@ -152,7 +152,7 @@ static struct dentry *btrfs_fh_to_dentry(struct super_block *sb, struct fid *fh,
1749 return btrfs_get_dentry(sb, objectid, root_objectid, generation, 1);
1750 }
1751
1752 -static struct dentry *btrfs_get_parent(struct dentry *child)
1753 +struct dentry *btrfs_get_parent(struct dentry *child)
1754 {
1755 struct inode *dir = d_inode(child);
1756 struct btrfs_fs_info *fs_info = btrfs_sb(dir->i_sb);
1757 diff --git a/fs/btrfs/export.h b/fs/btrfs/export.h
1758 index 57488ecd7d4ef..f32f4113c976a 100644
1759 --- a/fs/btrfs/export.h
1760 +++ b/fs/btrfs/export.h
1761 @@ -18,4 +18,9 @@ struct btrfs_fid {
1762 u64 parent_root_objectid;
1763 } __attribute__ ((packed));
1764
1765 +struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
1766 + u64 root_objectid, u32 generation,
1767 + int check_generation);
1768 +struct dentry *btrfs_get_parent(struct dentry *child);
1769 +
1770 #endif
1771 diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
1772 index 739332b462059..a36bd4507bacd 100644
1773 --- a/fs/btrfs/extent-tree.c
1774 +++ b/fs/btrfs/extent-tree.c
1775 @@ -1561,7 +1561,7 @@ static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1776 int err = 0;
1777 int metadata = !extent_op->is_data;
1778
1779 - if (trans->aborted)
1780 + if (TRANS_ABORTED(trans))
1781 return 0;
1782
1783 if (metadata && !btrfs_fs_incompat(fs_info, SKINNY_METADATA))
1784 @@ -1681,7 +1681,7 @@ static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1785 {
1786 int ret = 0;
1787
1788 - if (trans->aborted) {
1789 + if (TRANS_ABORTED(trans)) {
1790 if (insert_reserved)
1791 btrfs_pin_extent(trans->fs_info, node->bytenr,
1792 node->num_bytes, 1);
1793 @@ -2169,7 +2169,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
1794 int run_all = count == (unsigned long)-1;
1795
1796 /* We'll clean this up in btrfs_cleanup_transaction */
1797 - if (trans->aborted)
1798 + if (TRANS_ABORTED(trans))
1799 return 0;
1800
1801 if (test_bit(BTRFS_FS_CREATING_FREE_SPACE_TREE, &fs_info->flags))
1802 @@ -2892,7 +2892,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
1803 else
1804 unpin = &fs_info->freed_extents[0];
1805
1806 - while (!trans->aborted) {
1807 + while (!TRANS_ABORTED(trans)) {
1808 struct extent_state *cached_state = NULL;
1809
1810 mutex_lock(&fs_info->unused_bg_unpin_mutex);
1811 @@ -2924,7 +2924,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans)
1812 u64 trimmed = 0;
1813
1814 ret = -EROFS;
1815 - if (!trans->aborted)
1816 + if (!TRANS_ABORTED(trans))
1817 ret = btrfs_discard_extent(fs_info,
1818 block_group->key.objectid,
1819 block_group->key.offset,
1820 diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
1821 index 035ea5bc692ad..5707bf0575d43 100644
1822 --- a/fs/btrfs/extent_io.c
1823 +++ b/fs/btrfs/extent_io.c
1824 @@ -4073,7 +4073,7 @@ retry:
1825 if (!test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
1826 ret = flush_write_bio(&epd);
1827 } else {
1828 - ret = -EUCLEAN;
1829 + ret = -EROFS;
1830 end_write_bio(&epd, ret);
1831 }
1832 return ret;
1833 diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
1834 index a7b043fd7a572..498b824148187 100644
1835 --- a/fs/btrfs/scrub.c
1836 +++ b/fs/btrfs/scrub.c
1837 @@ -3717,7 +3717,7 @@ static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
1838 struct btrfs_fs_info *fs_info = sctx->fs_info;
1839
1840 if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
1841 - return -EIO;
1842 + return -EROFS;
1843
1844 /* Seed devices of a new filesystem has their own generation. */
1845 if (scrub_dev->fs_devices != fs_info->fs_devices)
1846 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
1847 index 4b0ee34aa65d5..a1498df419b4f 100644
1848 --- a/fs/btrfs/super.c
1849 +++ b/fs/btrfs/super.c
1850 @@ -241,7 +241,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
1851 {
1852 struct btrfs_fs_info *fs_info = trans->fs_info;
1853
1854 - trans->aborted = errno;
1855 + WRITE_ONCE(trans->aborted, errno);
1856 /* Nothing used. The other threads that have joined this
1857 * transaction may be able to continue. */
1858 if (!trans->dirty && list_empty(&trans->new_bgs)) {
1859 @@ -1009,8 +1009,8 @@ out:
1860 return error;
1861 }
1862
1863 -static char *get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1864 - u64 subvol_objectid)
1865 +char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info,
1866 + u64 subvol_objectid)
1867 {
1868 struct btrfs_root *root = fs_info->tree_root;
1869 struct btrfs_root *fs_root;
1870 @@ -1291,6 +1291,7 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1871 {
1872 struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
1873 const char *compress_type;
1874 + const char *subvol_name;
1875
1876 if (btrfs_test_opt(info, DEGRADED))
1877 seq_puts(seq, ",degraded");
1878 @@ -1375,8 +1376,13 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
1879 seq_puts(seq, ",ref_verify");
1880 seq_printf(seq, ",subvolid=%llu",
1881 BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1882 - seq_puts(seq, ",subvol=");
1883 - seq_dentry(seq, dentry, " \t\n\\");
1884 + subvol_name = btrfs_get_subvol_name_from_objectid(info,
1885 + BTRFS_I(d_inode(dentry))->root->root_key.objectid);
1886 + if (!IS_ERR(subvol_name)) {
1887 + seq_puts(seq, ",subvol=");
1888 + seq_escape(seq, subvol_name, " \t\n\\");
1889 + kfree(subvol_name);
1890 + }
1891 return 0;
1892 }
1893
1894 @@ -1421,8 +1427,8 @@ static struct dentry *mount_subvol(const char *subvol_name, u64 subvol_objectid,
1895 goto out;
1896 }
1897 }
1898 - subvol_name = get_subvol_name_from_objectid(btrfs_sb(mnt->mnt_sb),
1899 - subvol_objectid);
1900 + subvol_name = btrfs_get_subvol_name_from_objectid(
1901 + btrfs_sb(mnt->mnt_sb), subvol_objectid);
1902 if (IS_ERR(subvol_name)) {
1903 root = ERR_CAST(subvol_name);
1904 subvol_name = NULL;
1905 diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
1906 index 54589e940f9af..c346ee7ec18d4 100644
1907 --- a/fs/btrfs/transaction.c
1908 +++ b/fs/btrfs/transaction.c
1909 @@ -174,7 +174,7 @@ loop:
1910
1911 cur_trans = fs_info->running_transaction;
1912 if (cur_trans) {
1913 - if (cur_trans->aborted) {
1914 + if (TRANS_ABORTED(cur_trans)) {
1915 spin_unlock(&fs_info->trans_lock);
1916 return cur_trans->aborted;
1917 }
1918 @@ -390,7 +390,7 @@ static inline int is_transaction_blocked(struct btrfs_transaction *trans)
1919 {
1920 return (trans->state >= TRANS_STATE_BLOCKED &&
1921 trans->state < TRANS_STATE_UNBLOCKED &&
1922 - !trans->aborted);
1923 + !TRANS_ABORTED(trans));
1924 }
1925
1926 /* wait for commit against the current transaction to become unblocked
1927 @@ -409,7 +409,7 @@ static void wait_current_trans(struct btrfs_fs_info *fs_info)
1928
1929 wait_event(fs_info->transaction_wait,
1930 cur_trans->state >= TRANS_STATE_UNBLOCKED ||
1931 - cur_trans->aborted);
1932 + TRANS_ABORTED(cur_trans));
1933 btrfs_put_transaction(cur_trans);
1934 } else {
1935 spin_unlock(&fs_info->trans_lock);
1936 @@ -870,10 +870,13 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
1937 if (throttle)
1938 btrfs_run_delayed_iputs(info);
1939
1940 - if (trans->aborted ||
1941 + if (TRANS_ABORTED(trans) ||
1942 test_bit(BTRFS_FS_STATE_ERROR, &info->fs_state)) {
1943 wake_up_process(info->transaction_kthread);
1944 - err = -EIO;
1945 + if (TRANS_ABORTED(trans))
1946 + err = trans->aborted;
1947 + else
1948 + err = -EROFS;
1949 }
1950
1951 kmem_cache_free(btrfs_trans_handle_cachep, trans);
1952 @@ -1727,7 +1730,8 @@ static void wait_current_trans_commit_start(struct btrfs_fs_info *fs_info,
1953 struct btrfs_transaction *trans)
1954 {
1955 wait_event(fs_info->transaction_blocked_wait,
1956 - trans->state >= TRANS_STATE_COMMIT_START || trans->aborted);
1957 + trans->state >= TRANS_STATE_COMMIT_START ||
1958 + TRANS_ABORTED(trans));
1959 }
1960
1961 /*
1962 @@ -1739,7 +1743,8 @@ static void wait_current_trans_commit_start_and_unblock(
1963 struct btrfs_transaction *trans)
1964 {
1965 wait_event(fs_info->transaction_wait,
1966 - trans->state >= TRANS_STATE_UNBLOCKED || trans->aborted);
1967 + trans->state >= TRANS_STATE_UNBLOCKED ||
1968 + TRANS_ABORTED(trans));
1969 }
1970
1971 /*
1972 @@ -1957,7 +1962,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
1973 trans->dirty = true;
1974
1975 /* Stop the commit early if ->aborted is set */
1976 - if (unlikely(READ_ONCE(cur_trans->aborted))) {
1977 + if (TRANS_ABORTED(cur_trans)) {
1978 ret = cur_trans->aborted;
1979 btrfs_end_transaction(trans);
1980 return ret;
1981 @@ -2031,7 +2036,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
1982
1983 wait_for_commit(cur_trans);
1984
1985 - if (unlikely(cur_trans->aborted))
1986 + if (TRANS_ABORTED(cur_trans))
1987 ret = cur_trans->aborted;
1988
1989 btrfs_put_transaction(cur_trans);
1990 @@ -2050,7 +2055,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
1991 spin_unlock(&fs_info->trans_lock);
1992
1993 wait_for_commit(prev_trans);
1994 - ret = prev_trans->aborted;
1995 + ret = READ_ONCE(prev_trans->aborted);
1996
1997 btrfs_put_transaction(prev_trans);
1998 if (ret)
1999 @@ -2104,8 +2109,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2000 wait_event(cur_trans->writer_wait,
2001 atomic_read(&cur_trans->num_writers) == 1);
2002
2003 - /* ->aborted might be set after the previous check, so check it */
2004 - if (unlikely(READ_ONCE(cur_trans->aborted))) {
2005 + if (TRANS_ABORTED(cur_trans)) {
2006 ret = cur_trans->aborted;
2007 goto scrub_continue;
2008 }
2009 @@ -2223,7 +2227,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
2010 * The tasks which save the space cache and inode cache may also
2011 * update ->aborted, check it.
2012 */
2013 - if (unlikely(READ_ONCE(cur_trans->aborted))) {
2014 + if (TRANS_ABORTED(cur_trans)) {
2015 ret = cur_trans->aborted;
2016 mutex_unlock(&fs_info->tree_log_mutex);
2017 mutex_unlock(&fs_info->reloc_mutex);
2018 diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
2019 index b15c31d231488..7291a2a930751 100644
2020 --- a/fs/btrfs/transaction.h
2021 +++ b/fs/btrfs/transaction.h
2022 @@ -116,6 +116,10 @@ struct btrfs_trans_handle {
2023 struct btrfs_block_rsv *orig_rsv;
2024 refcount_t use_count;
2025 unsigned int type;
2026 + /*
2027 + * Error code of transaction abort, set outside of locks and must use
2028 + * the READ_ONCE/WRITE_ONCE access
2029 + */
2030 short aborted;
2031 bool adding_csums;
2032 bool allocating_chunk;
2033 @@ -127,6 +131,14 @@ struct btrfs_trans_handle {
2034 struct list_head new_bgs;
2035 };
2036
2037 +/*
2038 + * The abort status can be changed between calls and is not protected by locks.
2039 + * This accepts btrfs_transaction and btrfs_trans_handle as types. Once it's
2040 + * set to a non-zero value it does not change, so the macro should be in checks
2041 + * but is not necessary for further reads of the value.
2042 + */
2043 +#define TRANS_ABORTED(trans) (unlikely(READ_ONCE((trans)->aborted)))
2044 +
2045 struct btrfs_pending_snapshot {
2046 struct dentry *dentry;
2047 struct inode *dir;
2048 diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
2049 index 701bc3f4d4ba1..b0077f5a31688 100644
2050 --- a/fs/ceph/mds_client.c
2051 +++ b/fs/ceph/mds_client.c
2052 @@ -4143,7 +4143,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
2053 return -ENOMEM;
2054 }
2055
2056 - fsc->mdsc = mdsc;
2057 init_completion(&mdsc->safe_umount_waiters);
2058 init_waitqueue_head(&mdsc->session_close_wq);
2059 INIT_LIST_HEAD(&mdsc->waiting_for_map);
2060 @@ -4195,6 +4194,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
2061
2062 strscpy(mdsc->nodename, utsname()->nodename,
2063 sizeof(mdsc->nodename));
2064 +
2065 + fsc->mdsc = mdsc;
2066 return 0;
2067 }
2068
2069 diff --git a/fs/eventpoll.c b/fs/eventpoll.c
2070 index 6307c1d883e0a..0d9b1e2b9da72 100644
2071 --- a/fs/eventpoll.c
2072 +++ b/fs/eventpoll.c
2073 @@ -1991,9 +1991,11 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests)
2074 * not already there, and calling reverse_path_check()
2075 * during ep_insert().
2076 */
2077 - if (list_empty(&epi->ffd.file->f_tfile_llink))
2078 + if (list_empty(&epi->ffd.file->f_tfile_llink)) {
2079 + get_file(epi->ffd.file);
2080 list_add(&epi->ffd.file->f_tfile_llink,
2081 &tfile_check_list);
2082 + }
2083 }
2084 }
2085 mutex_unlock(&ep->mtx);
2086 @@ -2037,6 +2039,7 @@ static void clear_tfile_check_list(void)
2087 file = list_first_entry(&tfile_check_list, struct file,
2088 f_tfile_llink);
2089 list_del_init(&file->f_tfile_llink);
2090 + fput(file);
2091 }
2092 INIT_LIST_HEAD(&tfile_check_list);
2093 }
2094 @@ -2192,13 +2195,13 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2095 mutex_lock(&epmutex);
2096 if (is_file_epoll(tf.file)) {
2097 error = -ELOOP;
2098 - if (ep_loop_check(ep, tf.file) != 0) {
2099 - clear_tfile_check_list();
2100 + if (ep_loop_check(ep, tf.file) != 0)
2101 goto error_tgt_fput;
2102 - }
2103 - } else
2104 + } else {
2105 + get_file(tf.file);
2106 list_add(&tf.file->f_tfile_llink,
2107 &tfile_check_list);
2108 + }
2109 mutex_lock_nested(&ep->mtx, 0);
2110 if (is_file_epoll(tf.file)) {
2111 tep = tf.file->private_data;
2112 @@ -2222,8 +2225,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2113 error = ep_insert(ep, &epds, tf.file, fd, full_check);
2114 } else
2115 error = -EEXIST;
2116 - if (full_check)
2117 - clear_tfile_check_list();
2118 break;
2119 case EPOLL_CTL_DEL:
2120 if (epi)
2121 @@ -2246,8 +2247,10 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
2122 mutex_unlock(&ep->mtx);
2123
2124 error_tgt_fput:
2125 - if (full_check)
2126 + if (full_check) {
2127 + clear_tfile_check_list();
2128 mutex_unlock(&epmutex);
2129 + }
2130
2131 fdput(tf);
2132 error_fput:
2133 diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
2134 index ff8e1205127ee..ceb54ccc937e9 100644
2135 --- a/fs/ext4/block_validity.c
2136 +++ b/fs/ext4/block_validity.c
2137 @@ -68,7 +68,7 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
2138 ext4_fsblk_t start_blk,
2139 unsigned int count)
2140 {
2141 - struct ext4_system_zone *new_entry = NULL, *entry;
2142 + struct ext4_system_zone *new_entry, *entry;
2143 struct rb_node **n = &system_blks->root.rb_node, *node;
2144 struct rb_node *parent = NULL, *new_node = NULL;
2145
2146 @@ -79,30 +79,20 @@ static int add_system_zone(struct ext4_system_blocks *system_blks,
2147 n = &(*n)->rb_left;
2148 else if (start_blk >= (entry->start_blk + entry->count))
2149 n = &(*n)->rb_right;
2150 - else {
2151 - if (start_blk + count > (entry->start_blk +
2152 - entry->count))
2153 - entry->count = (start_blk + count -
2154 - entry->start_blk);
2155 - new_node = *n;
2156 - new_entry = rb_entry(new_node, struct ext4_system_zone,
2157 - node);
2158 - break;
2159 - }
2160 + else /* Unexpected overlap of system zones. */
2161 + return -EFSCORRUPTED;
2162 }
2163
2164 - if (!new_entry) {
2165 - new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
2166 - GFP_KERNEL);
2167 - if (!new_entry)
2168 - return -ENOMEM;
2169 - new_entry->start_blk = start_blk;
2170 - new_entry->count = count;
2171 - new_node = &new_entry->node;
2172 -
2173 - rb_link_node(new_node, parent, n);
2174 - rb_insert_color(new_node, &system_blks->root);
2175 - }
2176 + new_entry = kmem_cache_alloc(ext4_system_zone_cachep,
2177 + GFP_KERNEL);
2178 + if (!new_entry)
2179 + return -ENOMEM;
2180 + new_entry->start_blk = start_blk;
2181 + new_entry->count = count;
2182 + new_node = &new_entry->node;
2183 +
2184 + rb_link_node(new_node, parent, n);
2185 + rb_insert_color(new_node, &system_blks->root);
2186
2187 /* Can we merge to the left? */
2188 node = rb_prev(new_node);
2189 diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
2190 index a564d0289a70a..36a81b57012a5 100644
2191 --- a/fs/ext4/namei.c
2192 +++ b/fs/ext4/namei.c
2193 @@ -1392,8 +1392,8 @@ int ext4_search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
2194 ext4_match(dir, fname, de)) {
2195 /* found a match - just to be sure, do
2196 * a full check */
2197 - if (ext4_check_dir_entry(dir, NULL, de, bh, bh->b_data,
2198 - bh->b_size, offset))
2199 + if (ext4_check_dir_entry(dir, NULL, de, bh, search_buf,
2200 + buf_size, offset))
2201 return -1;
2202 *res_dir = de;
2203 return 1;
2204 @@ -1852,7 +1852,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
2205 blocksize, hinfo, map);
2206 map -= count;
2207 dx_sort_map(map, count);
2208 - /* Split the existing block in the middle, size-wise */
2209 + /* Ensure that neither split block is over half full */
2210 size = 0;
2211 move = 0;
2212 for (i = count-1; i >= 0; i--) {
2213 @@ -1862,8 +1862,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
2214 size += map[i].size;
2215 move++;
2216 }
2217 - /* map index at which we will split */
2218 - split = count - move;
2219 + /*
2220 + * map index at which we will split
2221 + *
2222 + * If the sum of active entries didn't exceed half the block size, just
2223 + * split it in half by count; each resulting block will have at least
2224 + * half the space free.
2225 + */
2226 + if (i > 0)
2227 + split = count - move;
2228 + else
2229 + split = count/2;
2230 +
2231 hash2 = map[split].hash;
2232 continued = hash2 == map[split - 1].hash;
2233 dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
2234 @@ -2462,7 +2472,7 @@ int ext4_generic_delete_entry(handle_t *handle,
2235 de = (struct ext4_dir_entry_2 *)entry_buf;
2236 while (i < buf_size - csum_size) {
2237 if (ext4_check_dir_entry(dir, NULL, de, bh,
2238 - bh->b_data, bh->b_size, i))
2239 + entry_buf, buf_size, i))
2240 return -EFSCORRUPTED;
2241 if (de == de_del) {
2242 if (pde)
2243 diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
2244 index adbb8fef22162..50fa3e08c02f3 100644
2245 --- a/fs/gfs2/bmap.c
2246 +++ b/fs/gfs2/bmap.c
2247 @@ -1350,9 +1350,15 @@ int gfs2_extent_map(struct inode *inode, u64 lblock, int *new, u64 *dblock, unsi
2248 return ret;
2249 }
2250
2251 +/*
2252 + * NOTE: Never call gfs2_block_zero_range with an open transaction because it
2253 + * uses iomap write to perform its actions, which begin their own transactions
2254 + * (iomap_begin, page_prepare, etc.)
2255 + */
2256 static int gfs2_block_zero_range(struct inode *inode, loff_t from,
2257 unsigned int length)
2258 {
2259 + BUG_ON(current->journal_info);
2260 return iomap_zero_range(inode, from, length, NULL, &gfs2_iomap_ops);
2261 }
2262
2263 @@ -1413,6 +1419,16 @@ static int trunc_start(struct inode *inode, u64 newsize)
2264 u64 oldsize = inode->i_size;
2265 int error;
2266
2267 + if (!gfs2_is_stuffed(ip)) {
2268 + unsigned int blocksize = i_blocksize(inode);
2269 + unsigned int offs = newsize & (blocksize - 1);
2270 + if (offs) {
2271 + error = gfs2_block_zero_range(inode, newsize,
2272 + blocksize - offs);
2273 + if (error)
2274 + return error;
2275 + }
2276 + }
2277 if (journaled)
2278 error = gfs2_trans_begin(sdp, RES_DINODE + RES_JDATA, GFS2_JTRUNC_REVOKES);
2279 else
2280 @@ -1426,19 +1442,10 @@ static int trunc_start(struct inode *inode, u64 newsize)
2281
2282 gfs2_trans_add_meta(ip->i_gl, dibh);
2283
2284 - if (gfs2_is_stuffed(ip)) {
2285 + if (gfs2_is_stuffed(ip))
2286 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode) + newsize);
2287 - } else {
2288 - unsigned int blocksize = i_blocksize(inode);
2289 - unsigned int offs = newsize & (blocksize - 1);
2290 - if (offs) {
2291 - error = gfs2_block_zero_range(inode, newsize,
2292 - blocksize - offs);
2293 - if (error)
2294 - goto out;
2295 - }
2296 + else
2297 ip->i_diskflags |= GFS2_DIF_TRUNC_IN_PROG;
2298 - }
2299
2300 i_size_write(inode, newsize);
2301 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode);
2302 @@ -2442,24 +2449,13 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2303 struct inode *inode = file_inode(file);
2304 struct gfs2_inode *ip = GFS2_I(inode);
2305 struct gfs2_sbd *sdp = GFS2_SB(inode);
2306 + unsigned int blocksize = i_blocksize(inode);
2307 + loff_t start, end;
2308 int error;
2309
2310 - if (gfs2_is_jdata(ip))
2311 - error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2312 - GFS2_JTRUNC_REVOKES);
2313 - else
2314 - error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2315 - if (error)
2316 - return error;
2317 + if (!gfs2_is_stuffed(ip)) {
2318 + unsigned int start_off, end_len;
2319
2320 - if (gfs2_is_stuffed(ip)) {
2321 - error = stuffed_zero_range(inode, offset, length);
2322 - if (error)
2323 - goto out;
2324 - } else {
2325 - unsigned int start_off, end_len, blocksize;
2326 -
2327 - blocksize = i_blocksize(inode);
2328 start_off = offset & (blocksize - 1);
2329 end_len = (offset + length) & (blocksize - 1);
2330 if (start_off) {
2331 @@ -2480,6 +2476,26 @@ int __gfs2_punch_hole(struct file *file, loff_t offset, loff_t length)
2332 }
2333 }
2334
2335 + start = round_down(offset, blocksize);
2336 + end = round_up(offset + length, blocksize) - 1;
2337 + error = filemap_write_and_wait_range(inode->i_mapping, start, end);
2338 + if (error)
2339 + return error;
2340 +
2341 + if (gfs2_is_jdata(ip))
2342 + error = gfs2_trans_begin(sdp, RES_DINODE + 2 * RES_JDATA,
2343 + GFS2_JTRUNC_REVOKES);
2344 + else
2345 + error = gfs2_trans_begin(sdp, RES_DINODE, 0);
2346 + if (error)
2347 + return error;
2348 +
2349 + if (gfs2_is_stuffed(ip)) {
2350 + error = stuffed_zero_range(inode, offset, length);
2351 + if (error)
2352 + goto out;
2353 + }
2354 +
2355 if (gfs2_is_jdata(ip)) {
2356 BUG_ON(!current->journal_info);
2357 gfs2_journaled_truncate_range(inode, offset, length);
2358 diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
2359 index fa58835668a62..b7c5819bfc411 100644
2360 --- a/fs/jbd2/journal.c
2361 +++ b/fs/jbd2/journal.c
2362 @@ -1348,8 +1348,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
2363 int ret;
2364
2365 /* Buffer got discarded which means block device got invalidated */
2366 - if (!buffer_mapped(bh))
2367 + if (!buffer_mapped(bh)) {
2368 + unlock_buffer(bh);
2369 return -EIO;
2370 + }
2371
2372 trace_jbd2_write_superblock(journal, write_flags);
2373 if (!(journal->j_flags & JBD2_BARRIER))
2374 diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
2375 index f20cff1194bb6..776493713153f 100644
2376 --- a/fs/jffs2/dir.c
2377 +++ b/fs/jffs2/dir.c
2378 @@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
2379 int ret;
2380 uint32_t now = JFFS2_NOW();
2381
2382 + mutex_lock(&f->sem);
2383 for (fd = f->dents ; fd; fd = fd->next) {
2384 - if (fd->ino)
2385 + if (fd->ino) {
2386 + mutex_unlock(&f->sem);
2387 return -ENOTEMPTY;
2388 + }
2389 }
2390 + mutex_unlock(&f->sem);
2391
2392 ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
2393 dentry->d_name.len, f, now);
2394 diff --git a/fs/romfs/storage.c b/fs/romfs/storage.c
2395 index 6b2b4362089e6..b57b3ffcbc327 100644
2396 --- a/fs/romfs/storage.c
2397 +++ b/fs/romfs/storage.c
2398 @@ -217,10 +217,8 @@ int romfs_dev_read(struct super_block *sb, unsigned long pos,
2399 size_t limit;
2400
2401 limit = romfs_maxsize(sb);
2402 - if (pos >= limit)
2403 + if (pos >= limit || buflen > limit - pos)
2404 return -EIO;
2405 - if (buflen > limit - pos)
2406 - buflen = limit - pos;
2407
2408 #ifdef CONFIG_ROMFS_ON_MTD
2409 if (sb->s_mtd)
2410 diff --git a/fs/signalfd.c b/fs/signalfd.c
2411 index 44b6845b071c3..5b78719be4455 100644
2412 --- a/fs/signalfd.c
2413 +++ b/fs/signalfd.c
2414 @@ -314,9 +314,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
2415 {
2416 sigset_t mask;
2417
2418 - if (sizemask != sizeof(sigset_t) ||
2419 - copy_from_user(&mask, user_mask, sizeof(mask)))
2420 + if (sizemask != sizeof(sigset_t))
2421 return -EINVAL;
2422 + if (copy_from_user(&mask, user_mask, sizeof(mask)))
2423 + return -EFAULT;
2424 return do_signalfd4(ufd, &mask, flags);
2425 }
2426
2427 @@ -325,9 +326,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
2428 {
2429 sigset_t mask;
2430
2431 - if (sizemask != sizeof(sigset_t) ||
2432 - copy_from_user(&mask, user_mask, sizeof(mask)))
2433 + if (sizemask != sizeof(sigset_t))
2434 return -EINVAL;
2435 + if (copy_from_user(&mask, user_mask, sizeof(mask)))
2436 + return -EFAULT;
2437 return do_signalfd4(ufd, &mask, 0);
2438 }
2439
2440 diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
2441 index e9f810fc67317..43585850f1546 100644
2442 --- a/fs/xfs/xfs_sysfs.h
2443 +++ b/fs/xfs/xfs_sysfs.h
2444 @@ -32,9 +32,11 @@ xfs_sysfs_init(
2445 struct xfs_kobj *parent_kobj,
2446 const char *name)
2447 {
2448 + struct kobject *parent;
2449 +
2450 + parent = parent_kobj ? &parent_kobj->kobject : NULL;
2451 init_completion(&kobj->complete);
2452 - return kobject_init_and_add(&kobj->kobject, ktype,
2453 - &parent_kobj->kobject, "%s", name);
2454 + return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
2455 }
2456
2457 static inline void
2458 diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
2459 index 16457465833ba..904780dd74aa3 100644
2460 --- a/fs/xfs/xfs_trans_dquot.c
2461 +++ b/fs/xfs/xfs_trans_dquot.c
2462 @@ -646,7 +646,7 @@ xfs_trans_dqresv(
2463 }
2464 }
2465 if (ninos > 0) {
2466 - total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
2467 + total_count = dqp->q_res_icount + ninos;
2468 timer = be32_to_cpu(dqp->q_core.d_itimer);
2469 warns = be16_to_cpu(dqp->q_core.d_iwarns);
2470 warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
2471 diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
2472 index aa83538efc238..a793bd23fe56c 100644
2473 --- a/kernel/events/uprobes.c
2474 +++ b/kernel/events/uprobes.c
2475 @@ -211,7 +211,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
2476 try_to_free_swap(old_page);
2477 page_vma_mapped_walk_done(&pvmw);
2478
2479 - if (vma->vm_flags & VM_LOCKED)
2480 + if ((vma->vm_flags & VM_LOCKED) && !PageCompound(old_page))
2481 munlock_vma_page(old_page);
2482 put_page(old_page);
2483
2484 diff --git a/kernel/kthread.c b/kernel/kthread.c
2485 index b262f47046ca4..bfbfa481be3a5 100644
2486 --- a/kernel/kthread.c
2487 +++ b/kernel/kthread.c
2488 @@ -199,8 +199,15 @@ static void __kthread_parkme(struct kthread *self)
2489 if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
2490 break;
2491
2492 + /*
2493 + * Thread is going to call schedule(), do not preempt it,
2494 + * or the caller of kthread_park() may spend more time in
2495 + * wait_task_inactive().
2496 + */
2497 + preempt_disable();
2498 complete(&self->parked);
2499 - schedule();
2500 + schedule_preempt_disabled();
2501 + preempt_enable();
2502 }
2503 __set_current_state(TASK_RUNNING);
2504 }
2505 @@ -245,8 +252,14 @@ static int kthread(void *_create)
2506 /* OK, tell user we're spawned, wait for stop or wakeup */
2507 __set_current_state(TASK_UNINTERRUPTIBLE);
2508 create->result = current;
2509 + /*
2510 + * Thread is going to call schedule(), do not preempt it,
2511 + * or the creator may spend more time in wait_task_inactive().
2512 + */
2513 + preempt_disable();
2514 complete(done);
2515 - schedule();
2516 + schedule_preempt_disabled();
2517 + preempt_enable();
2518
2519 ret = -EINTR;
2520 if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
2521 diff --git a/kernel/relay.c b/kernel/relay.c
2522 index 4b760ec163426..d3940becf2fc3 100644
2523 --- a/kernel/relay.c
2524 +++ b/kernel/relay.c
2525 @@ -197,6 +197,7 @@ free_buf:
2526 static void relay_destroy_channel(struct kref *kref)
2527 {
2528 struct rchan *chan = container_of(kref, struct rchan, kref);
2529 + free_percpu(chan->buf);
2530 kfree(chan);
2531 }
2532
2533 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
2534 index 2af1831596f22..2a83b03c54a69 100644
2535 --- a/mm/hugetlb.c
2536 +++ b/mm/hugetlb.c
2537 @@ -4846,25 +4846,21 @@ static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
2538 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
2539 unsigned long *start, unsigned long *end)
2540 {
2541 - unsigned long check_addr = *start;
2542 + unsigned long a_start, a_end;
2543
2544 if (!(vma->vm_flags & VM_MAYSHARE))
2545 return;
2546
2547 - for (check_addr = *start; check_addr < *end; check_addr += PUD_SIZE) {
2548 - unsigned long a_start = check_addr & PUD_MASK;
2549 - unsigned long a_end = a_start + PUD_SIZE;
2550 + /* Extend the range to be PUD aligned for a worst case scenario */
2551 + a_start = ALIGN_DOWN(*start, PUD_SIZE);
2552 + a_end = ALIGN(*end, PUD_SIZE);
2553
2554 - /*
2555 - * If sharing is possible, adjust start/end if necessary.
2556 - */
2557 - if (range_in_vma(vma, a_start, a_end)) {
2558 - if (a_start < *start)
2559 - *start = a_start;
2560 - if (a_end > *end)
2561 - *end = a_end;
2562 - }
2563 - }
2564 + /*
2565 + * Intersect the range with the vma range, since pmd sharing won't be
2566 + * across vma after all
2567 + */
2568 + *start = max(vma->vm_start, a_start);
2569 + *end = min(vma->vm_end, a_end);
2570 }
2571
2572 /*
2573 diff --git a/mm/khugepaged.c b/mm/khugepaged.c
2574 index 719f49d1fba2f..3623d1c5343f2 100644
2575 --- a/mm/khugepaged.c
2576 +++ b/mm/khugepaged.c
2577 @@ -401,7 +401,7 @@ static void insert_to_mm_slots_hash(struct mm_struct *mm,
2578
2579 static inline int khugepaged_test_exit(struct mm_struct *mm)
2580 {
2581 - return atomic_read(&mm->mm_users) == 0;
2582 + return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
2583 }
2584
2585 static bool hugepage_vma_check(struct vm_area_struct *vma,
2586 @@ -438,7 +438,7 @@ int __khugepaged_enter(struct mm_struct *mm)
2587 return -ENOMEM;
2588
2589 /* __khugepaged_exit() must not run from under us */
2590 - VM_BUG_ON_MM(khugepaged_test_exit(mm), mm);
2591 + VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
2592 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
2593 free_mm_slot(mm_slot);
2594 return 0;
2595 @@ -1019,9 +1019,6 @@ static void collapse_huge_page(struct mm_struct *mm,
2596 * handled by the anon_vma lock + PG_lock.
2597 */
2598 down_write(&mm->mmap_sem);
2599 - result = SCAN_ANY_PROCESS;
2600 - if (!mmget_still_valid(mm))
2601 - goto out;
2602 result = hugepage_vma_revalidate(mm, address, &vma);
2603 if (result)
2604 goto out;
2605 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
2606 index 8686fe760f34c..67a9943aa595f 100644
2607 --- a/mm/page_alloc.c
2608 +++ b/mm/page_alloc.c
2609 @@ -1256,6 +1256,11 @@ static void free_pcppages_bulk(struct zone *zone, int count,
2610 struct page *page, *tmp;
2611 LIST_HEAD(head);
2612
2613 + /*
2614 + * Ensure proper count is passed which otherwise would stuck in the
2615 + * below while (list_empty(list)) loop.
2616 + */
2617 + count = min(pcp->count, count);
2618 while (count) {
2619 struct list_head *list;
2620
2621 @@ -7867,7 +7872,7 @@ int __meminit init_per_zone_wmark_min(void)
2622
2623 return 0;
2624 }
2625 -core_initcall(init_per_zone_wmark_min)
2626 +postcore_initcall(init_per_zone_wmark_min)
2627
2628 /*
2629 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
2630 diff --git a/net/can/j1939/socket.c b/net/can/j1939/socket.c
2631 index f7587428febdd..bf9fd6ee88fe0 100644
2632 --- a/net/can/j1939/socket.c
2633 +++ b/net/can/j1939/socket.c
2634 @@ -398,6 +398,7 @@ static int j1939_sk_init(struct sock *sk)
2635 spin_lock_init(&jsk->sk_session_queue_lock);
2636 INIT_LIST_HEAD(&jsk->sk_session_queue);
2637 sk->sk_destruct = j1939_sk_sock_destruct;
2638 + sk->sk_protocol = CAN_J1939;
2639
2640 return 0;
2641 }
2642 @@ -466,6 +467,14 @@ static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
2643 goto out_release_sock;
2644 }
2645
2646 + if (!ndev->ml_priv) {
2647 + netdev_warn_once(ndev,
2648 + "No CAN mid layer private allocated, please fix your driver and use alloc_candev()!\n");
2649 + dev_put(ndev);
2650 + ret = -ENODEV;
2651 + goto out_release_sock;
2652 + }
2653 +
2654 priv = j1939_netdev_start(ndev);
2655 dev_put(ndev);
2656 if (IS_ERR(priv)) {
2657 @@ -553,6 +562,11 @@ static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
2658 static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
2659 const struct j1939_sock *jsk, int peer)
2660 {
2661 + /* There are two holes (2 bytes and 3 bytes) to clear to avoid
2662 + * leaking kernel information to user space.
2663 + */
2664 + memset(addr, 0, J1939_MIN_NAMELEN);
2665 +
2666 addr->can_family = AF_CAN;
2667 addr->can_ifindex = jsk->ifindex;
2668 addr->can_addr.j1939.pgn = jsk->addr.pgn;
2669 diff --git a/net/can/j1939/transport.c b/net/can/j1939/transport.c
2670 index 9f99af5b0b11e..dbd215cbc53d8 100644
2671 --- a/net/can/j1939/transport.c
2672 +++ b/net/can/j1939/transport.c
2673 @@ -352,17 +352,16 @@ void j1939_session_skb_queue(struct j1939_session *session,
2674 skb_queue_tail(&session->skb_queue, skb);
2675 }
2676
2677 -static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
2678 +static struct
2679 +sk_buff *j1939_session_skb_find_by_offset(struct j1939_session *session,
2680 + unsigned int offset_start)
2681 {
2682 struct j1939_priv *priv = session->priv;
2683 + struct j1939_sk_buff_cb *do_skcb;
2684 struct sk_buff *skb = NULL;
2685 struct sk_buff *do_skb;
2686 - struct j1939_sk_buff_cb *do_skcb;
2687 - unsigned int offset_start;
2688 unsigned long flags;
2689
2690 - offset_start = session->pkt.dpo * 7;
2691 -
2692 spin_lock_irqsave(&session->skb_queue.lock, flags);
2693 skb_queue_walk(&session->skb_queue, do_skb) {
2694 do_skcb = j1939_skb_to_cb(do_skb);
2695 @@ -382,6 +381,14 @@ static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
2696 return skb;
2697 }
2698
2699 +static struct sk_buff *j1939_session_skb_find(struct j1939_session *session)
2700 +{
2701 + unsigned int offset_start;
2702 +
2703 + offset_start = session->pkt.dpo * 7;
2704 + return j1939_session_skb_find_by_offset(session, offset_start);
2705 +}
2706 +
2707 /* see if we are receiver
2708 * returns 0 for broadcasts, although we will receive them
2709 */
2710 @@ -716,10 +723,12 @@ static int j1939_session_tx_rts(struct j1939_session *session)
2711 return ret;
2712
2713 session->last_txcmd = dat[0];
2714 - if (dat[0] == J1939_TP_CMD_BAM)
2715 + if (dat[0] == J1939_TP_CMD_BAM) {
2716 j1939_tp_schedule_txtimer(session, 50);
2717 -
2718 - j1939_tp_set_rxtimeout(session, 1250);
2719 + j1939_tp_set_rxtimeout(session, 250);
2720 + } else {
2721 + j1939_tp_set_rxtimeout(session, 1250);
2722 + }
2723
2724 netdev_dbg(session->priv->ndev, "%s: 0x%p\n", __func__, session);
2725
2726 @@ -766,7 +775,7 @@ static int j1939_session_tx_dat(struct j1939_session *session)
2727 int ret = 0;
2728 u8 dat[8];
2729
2730 - se_skb = j1939_session_skb_find(session);
2731 + se_skb = j1939_session_skb_find_by_offset(session, session->pkt.tx * 7);
2732 if (!se_skb)
2733 return -ENOBUFS;
2734
2735 @@ -787,6 +796,18 @@ static int j1939_session_tx_dat(struct j1939_session *session)
2736 if (len > 7)
2737 len = 7;
2738
2739 + if (offset + len > se_skb->len) {
2740 + netdev_err_once(priv->ndev,
2741 + "%s: 0x%p: requested data outside of queued buffer: offset %i, len %i, pkt.tx: %i\n",
2742 + __func__, session, skcb->offset, se_skb->len , session->pkt.tx);
2743 + return -EOVERFLOW;
2744 + }
2745 +
2746 + if (!len) {
2747 + ret = -ENOBUFS;
2748 + break;
2749 + }
2750 +
2751 memcpy(&dat[1], &tpdat[offset], len);
2752 ret = j1939_tp_tx_dat(session, dat, len + 1);
2753 if (ret < 0) {
2754 @@ -1055,9 +1076,9 @@ static void __j1939_session_cancel(struct j1939_session *session,
2755 lockdep_assert_held(&session->priv->active_session_list_lock);
2756
2757 session->err = j1939_xtp_abort_to_errno(priv, err);
2758 + session->state = J1939_SESSION_WAITING_ABORT;
2759 /* do not send aborts on incoming broadcasts */
2760 if (!j1939_cb_is_broadcast(&session->skcb)) {
2761 - session->state = J1939_SESSION_WAITING_ABORT;
2762 j1939_xtp_tx_abort(priv, &session->skcb,
2763 !session->transmission,
2764 err, session->skcb.addr.pgn);
2765 @@ -1120,6 +1141,9 @@ static enum hrtimer_restart j1939_tp_txtimer(struct hrtimer *hrtimer)
2766 * cleanup including propagation of the error to user space.
2767 */
2768 break;
2769 + case -EOVERFLOW:
2770 + j1939_session_cancel(session, J1939_XTP_ABORT_ECTS_TOO_BIG);
2771 + break;
2772 case 0:
2773 session->tx_retry = 0;
2774 break;
2775 @@ -1651,8 +1675,12 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
2776 return;
2777 }
2778 session = j1939_xtp_rx_rts_session_new(priv, skb);
2779 - if (!session)
2780 + if (!session) {
2781 + if (cmd == J1939_TP_CMD_BAM && j1939_sk_recv_match(priv, skcb))
2782 + netdev_info(priv->ndev, "%s: failed to create TP BAM session\n",
2783 + __func__);
2784 return;
2785 + }
2786 } else {
2787 if (j1939_xtp_rx_rts_session_active(session, skb)) {
2788 j1939_session_put(session);
2789 @@ -1661,11 +1689,15 @@ static void j1939_xtp_rx_rts(struct j1939_priv *priv, struct sk_buff *skb,
2790 }
2791 session->last_cmd = cmd;
2792
2793 - j1939_tp_set_rxtimeout(session, 1250);
2794 -
2795 - if (cmd != J1939_TP_CMD_BAM && !session->transmission) {
2796 - j1939_session_txtimer_cancel(session);
2797 - j1939_tp_schedule_txtimer(session, 0);
2798 + if (cmd == J1939_TP_CMD_BAM) {
2799 + if (!session->transmission)
2800 + j1939_tp_set_rxtimeout(session, 750);
2801 + } else {
2802 + if (!session->transmission) {
2803 + j1939_session_txtimer_cancel(session);
2804 + j1939_tp_schedule_txtimer(session, 0);
2805 + }
2806 + j1939_tp_set_rxtimeout(session, 1250);
2807 }
2808
2809 j1939_session_put(session);
2810 @@ -1716,6 +1748,7 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
2811 int offset;
2812 int nbytes;
2813 bool final = false;
2814 + bool remain = false;
2815 bool do_cts_eoma = false;
2816 int packet;
2817
2818 @@ -1750,7 +1783,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
2819 __func__, session);
2820 goto out_session_cancel;
2821 }
2822 - se_skb = j1939_session_skb_find(session);
2823 +
2824 + se_skb = j1939_session_skb_find_by_offset(session, packet * 7);
2825 if (!se_skb) {
2826 netdev_warn(priv->ndev, "%s: 0x%p: no skb found\n", __func__,
2827 session);
2828 @@ -1777,6 +1811,8 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
2829 j1939_cb_is_broadcast(&session->skcb)) {
2830 if (session->pkt.rx >= session->pkt.total)
2831 final = true;
2832 + else
2833 + remain = true;
2834 } else {
2835 /* never final, an EOMA must follow */
2836 if (session->pkt.rx >= session->pkt.last)
2837 @@ -1784,7 +1820,11 @@ static void j1939_xtp_rx_dat_one(struct j1939_session *session,
2838 }
2839
2840 if (final) {
2841 + j1939_session_timers_cancel(session);
2842 j1939_session_completed(session);
2843 + } else if (remain) {
2844 + if (!session->transmission)
2845 + j1939_tp_set_rxtimeout(session, 750);
2846 } else if (do_cts_eoma) {
2847 j1939_tp_set_rxtimeout(session, 1250);
2848 if (!session->transmission)
2849 @@ -1829,6 +1869,13 @@ static void j1939_xtp_rx_dat(struct j1939_priv *priv, struct sk_buff *skb)
2850 else
2851 j1939_xtp_rx_dat_one(session, skb);
2852 }
2853 +
2854 + if (j1939_cb_is_broadcast(skcb)) {
2855 + session = j1939_session_get_by_addr(priv, &skcb->addr, false,
2856 + false);
2857 + if (session)
2858 + j1939_xtp_rx_dat_one(session, skb);
2859 + }
2860 }
2861
2862 /* j1939 main intf */
2863 @@ -1920,7 +1967,7 @@ static void j1939_tp_cmd_recv(struct j1939_priv *priv, struct sk_buff *skb)
2864 if (j1939_tp_im_transmitter(skcb))
2865 j1939_xtp_rx_rts(priv, skb, true);
2866
2867 - if (j1939_tp_im_receiver(skcb))
2868 + if (j1939_tp_im_receiver(skcb) || j1939_cb_is_broadcast(skcb))
2869 j1939_xtp_rx_rts(priv, skb, false);
2870
2871 break;
2872 @@ -1984,7 +2031,7 @@ int j1939_tp_recv(struct j1939_priv *priv, struct sk_buff *skb)
2873 {
2874 struct j1939_sk_buff_cb *skcb = j1939_skb_to_cb(skb);
2875
2876 - if (!j1939_tp_im_involved_anydir(skcb))
2877 + if (!j1939_tp_im_involved_anydir(skcb) && !j1939_cb_is_broadcast(skcb))
2878 return 0;
2879
2880 switch (skcb->addr.pgn) {
2881 @@ -2017,6 +2064,10 @@ void j1939_simple_recv(struct j1939_priv *priv, struct sk_buff *skb)
2882 if (!skb->sk)
2883 return;
2884
2885 + if (skb->sk->sk_family != AF_CAN ||
2886 + skb->sk->sk_protocol != CAN_J1939)
2887 + return;
2888 +
2889 j1939_session_list_lock(priv);
2890 session = j1939_session_get_simple(priv, skb);
2891 j1939_session_list_unlock(priv);
2892 diff --git a/net/core/filter.c b/net/core/filter.c
2893 index bd1e46d61d8a1..5c490d473df1d 100644
2894 --- a/net/core/filter.c
2895 +++ b/net/core/filter.c
2896 @@ -8010,6 +8010,43 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
2897 offsetof(OBJ, OBJ_FIELD)); \
2898 } while (0)
2899
2900 +#define SOCK_OPS_GET_SK() \
2901 + do { \
2902 + int fullsock_reg = si->dst_reg, reg = BPF_REG_9, jmp = 1; \
2903 + if (si->dst_reg == reg || si->src_reg == reg) \
2904 + reg--; \
2905 + if (si->dst_reg == reg || si->src_reg == reg) \
2906 + reg--; \
2907 + if (si->dst_reg == si->src_reg) { \
2908 + *insn++ = BPF_STX_MEM(BPF_DW, si->src_reg, reg, \
2909 + offsetof(struct bpf_sock_ops_kern, \
2910 + temp)); \
2911 + fullsock_reg = reg; \
2912 + jmp += 2; \
2913 + } \
2914 + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
2915 + struct bpf_sock_ops_kern, \
2916 + is_fullsock), \
2917 + fullsock_reg, si->src_reg, \
2918 + offsetof(struct bpf_sock_ops_kern, \
2919 + is_fullsock)); \
2920 + *insn++ = BPF_JMP_IMM(BPF_JEQ, fullsock_reg, 0, jmp); \
2921 + if (si->dst_reg == si->src_reg) \
2922 + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
2923 + offsetof(struct bpf_sock_ops_kern, \
2924 + temp)); \
2925 + *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF( \
2926 + struct bpf_sock_ops_kern, sk),\
2927 + si->dst_reg, si->src_reg, \
2928 + offsetof(struct bpf_sock_ops_kern, sk));\
2929 + if (si->dst_reg == si->src_reg) { \
2930 + *insn++ = BPF_JMP_A(1); \
2931 + *insn++ = BPF_LDX_MEM(BPF_DW, reg, si->src_reg, \
2932 + offsetof(struct bpf_sock_ops_kern, \
2933 + temp)); \
2934 + } \
2935 + } while (0)
2936 +
2937 #define SOCK_OPS_GET_TCP_SOCK_FIELD(FIELD) \
2938 SOCK_OPS_GET_FIELD(FIELD, FIELD, struct tcp_sock)
2939
2940 @@ -8294,17 +8331,7 @@ static u32 sock_ops_convert_ctx_access(enum bpf_access_type type,
2941 SOCK_OPS_GET_TCP_SOCK_FIELD(bytes_acked);
2942 break;
2943 case offsetof(struct bpf_sock_ops, sk):
2944 - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
2945 - struct bpf_sock_ops_kern,
2946 - is_fullsock),
2947 - si->dst_reg, si->src_reg,
2948 - offsetof(struct bpf_sock_ops_kern,
2949 - is_fullsock));
2950 - *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
2951 - *insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(
2952 - struct bpf_sock_ops_kern, sk),
2953 - si->dst_reg, si->src_reg,
2954 - offsetof(struct bpf_sock_ops_kern, sk));
2955 + SOCK_OPS_GET_SK();
2956 break;
2957 }
2958 return insn - insn_buf;
2959 diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
2960 index a5e8469859e39..427d77b111b17 100644
2961 --- a/net/netfilter/nft_exthdr.c
2962 +++ b/net/netfilter/nft_exthdr.c
2963 @@ -44,7 +44,7 @@ static void nft_exthdr_ipv6_eval(const struct nft_expr *expr,
2964
2965 err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
2966 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
2967 - *dest = (err >= 0);
2968 + nft_reg_store8(dest, err >= 0);
2969 return;
2970 } else if (err < 0) {
2971 goto err;
2972 @@ -141,7 +141,7 @@ static void nft_exthdr_ipv4_eval(const struct nft_expr *expr,
2973
2974 err = ipv4_find_option(nft_net(pkt), skb, &offset, priv->type);
2975 if (priv->flags & NFT_EXTHDR_F_PRESENT) {
2976 - *dest = (err >= 0);
2977 + nft_reg_store8(dest, err >= 0);
2978 return;
2979 } else if (err < 0) {
2980 goto err;
2981 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
2982 index 0ce4e75b29812..d803d814a03ad 100644
2983 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
2984 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
2985 @@ -265,6 +265,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
2986 {
2987 struct svc_rdma_recv_ctxt *ctxt;
2988
2989 + if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
2990 + return 0;
2991 ctxt = svc_rdma_recv_ctxt_get(rdma);
2992 if (!ctxt)
2993 return -ENOMEM;
2994 diff --git a/scripts/kconfig/qconf.cc b/scripts/kconfig/qconf.cc
2995 index 0f8c77f847114..a94909ad9a53a 100644
2996 --- a/scripts/kconfig/qconf.cc
2997 +++ b/scripts/kconfig/qconf.cc
2998 @@ -869,40 +869,40 @@ void ConfigList::focusInEvent(QFocusEvent *e)
2999
3000 void ConfigList::contextMenuEvent(QContextMenuEvent *e)
3001 {
3002 - if (e->y() <= header()->geometry().bottom()) {
3003 - if (!headerPopup) {
3004 - QAction *action;
3005 -
3006 - headerPopup = new QMenu(this);
3007 - action = new QAction("Show Name", this);
3008 - action->setCheckable(true);
3009 - connect(action, SIGNAL(toggled(bool)),
3010 - parent(), SLOT(setShowName(bool)));
3011 - connect(parent(), SIGNAL(showNameChanged(bool)),
3012 - action, SLOT(setOn(bool)));
3013 - action->setChecked(showName);
3014 - headerPopup->addAction(action);
3015 - action = new QAction("Show Range", this);
3016 - action->setCheckable(true);
3017 - connect(action, SIGNAL(toggled(bool)),
3018 - parent(), SLOT(setShowRange(bool)));
3019 - connect(parent(), SIGNAL(showRangeChanged(bool)),
3020 - action, SLOT(setOn(bool)));
3021 - action->setChecked(showRange);
3022 - headerPopup->addAction(action);
3023 - action = new QAction("Show Data", this);
3024 - action->setCheckable(true);
3025 - connect(action, SIGNAL(toggled(bool)),
3026 - parent(), SLOT(setShowData(bool)));
3027 - connect(parent(), SIGNAL(showDataChanged(bool)),
3028 - action, SLOT(setOn(bool)));
3029 - action->setChecked(showData);
3030 - headerPopup->addAction(action);
3031 - }
3032 - headerPopup->exec(e->globalPos());
3033 - e->accept();
3034 - } else
3035 - e->ignore();
3036 + if (!headerPopup) {
3037 + QAction *action;
3038 +
3039 + headerPopup = new QMenu(this);
3040 + action = new QAction("Show Name", this);
3041 + action->setCheckable(true);
3042 + connect(action, SIGNAL(toggled(bool)),
3043 + parent(), SLOT(setShowName(bool)));
3044 + connect(parent(), SIGNAL(showNameChanged(bool)),
3045 + action, SLOT(setChecked(bool)));
3046 + action->setChecked(showName);
3047 + headerPopup->addAction(action);
3048 +
3049 + action = new QAction("Show Range", this);
3050 + action->setCheckable(true);
3051 + connect(action, SIGNAL(toggled(bool)),
3052 + parent(), SLOT(setShowRange(bool)));
3053 + connect(parent(), SIGNAL(showRangeChanged(bool)),
3054 + action, SLOT(setChecked(bool)));
3055 + action->setChecked(showRange);
3056 + headerPopup->addAction(action);
3057 +
3058 + action = new QAction("Show Data", this);
3059 + action->setCheckable(true);
3060 + connect(action, SIGNAL(toggled(bool)),
3061 + parent(), SLOT(setShowData(bool)));
3062 + connect(parent(), SIGNAL(showDataChanged(bool)),
3063 + action, SLOT(setChecked(bool)));
3064 + action->setChecked(showData);
3065 + headerPopup->addAction(action);
3066 + }
3067 +
3068 + headerPopup->exec(e->globalPos());
3069 + e->accept();
3070 }
3071
3072 ConfigView*ConfigView::viewList;
3073 @@ -1228,7 +1228,7 @@ QMenu* ConfigInfoView::createStandardContextMenu(const QPoint & pos)
3074
3075 action->setCheckable(true);
3076 connect(action, SIGNAL(toggled(bool)), SLOT(setShowDebug(bool)));
3077 - connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setOn(bool)));
3078 + connect(this, SIGNAL(showDebugChanged(bool)), action, SLOT(setChecked(bool)));
3079 action->setChecked(showDebug());
3080 popup->addSeparator();
3081 popup->addAction(action);
3082 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
3083 index 88629906f314c..06bbcfbb28153 100644
3084 --- a/sound/pci/hda/patch_realtek.c
3085 +++ b/sound/pci/hda/patch_realtek.c
3086 @@ -7666,6 +7666,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
3087 SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
3088 SND_PCI_QUIRK(0x144d, 0xc169, "Samsung Notebook 9 Pen (NP930SBE-K01US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3089 SND_PCI_QUIRK(0x144d, 0xc176, "Samsung Notebook 9 Pro (NP930MBE-K04US)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3090 + SND_PCI_QUIRK(0x144d, 0xc189, "Samsung Galaxy Flex Book (NT950QCG-X716)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3091 + SND_PCI_QUIRK(0x144d, 0xc18a, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3092 SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
3093 SND_PCI_QUIRK(0x144d, 0xc812, "Samsung Notebook Pen S (NT950SBE-X58)", ALC298_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET),
3094 SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
3095 diff --git a/sound/soc/codecs/msm8916-wcd-analog.c b/sound/soc/codecs/msm8916-wcd-analog.c
3096 index 84289ebeae872..337bddb7c2a49 100644
3097 --- a/sound/soc/codecs/msm8916-wcd-analog.c
3098 +++ b/sound/soc/codecs/msm8916-wcd-analog.c
3099 @@ -19,8 +19,8 @@
3100
3101 #define CDC_D_REVISION1 (0xf000)
3102 #define CDC_D_PERPH_SUBTYPE (0xf005)
3103 -#define CDC_D_INT_EN_SET (0x015)
3104 -#define CDC_D_INT_EN_CLR (0x016)
3105 +#define CDC_D_INT_EN_SET (0xf015)
3106 +#define CDC_D_INT_EN_CLR (0xf016)
3107 #define MBHC_SWITCH_INT BIT(7)
3108 #define MBHC_MIC_ELECTRICAL_INS_REM_DET BIT(6)
3109 #define MBHC_BUTTON_PRESS_DET BIT(5)
3110 diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3111 index 8cc3cc363eb03..31f1dd6541aa1 100644
3112 --- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3113 +++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
3114 @@ -331,7 +331,7 @@ static int sst_media_open(struct snd_pcm_substream *substream,
3115
3116 ret_val = power_up_sst(stream);
3117 if (ret_val < 0)
3118 - return ret_val;
3119 + goto out_power_up;
3120
3121 /* Make sure, that the period size is always even */
3122 snd_pcm_hw_constraint_step(substream->runtime, 0,
3123 @@ -340,8 +340,9 @@ static int sst_media_open(struct snd_pcm_substream *substream,
3124 return snd_pcm_hw_constraint_integer(runtime,
3125 SNDRV_PCM_HW_PARAM_PERIODS);
3126 out_ops:
3127 - kfree(stream);
3128 mutex_unlock(&sst_lock);
3129 +out_power_up:
3130 + kfree(stream);
3131 return ret_val;
3132 }
3133
3134 diff --git a/sound/soc/qcom/qdsp6/q6afe-dai.c b/sound/soc/qcom/qdsp6/q6afe-dai.c
3135 index 2a5302f1db98a..0168af8492727 100644
3136 --- a/sound/soc/qcom/qdsp6/q6afe-dai.c
3137 +++ b/sound/soc/qcom/qdsp6/q6afe-dai.c
3138 @@ -1150,206 +1150,206 @@ static int q6afe_of_xlate_dai_name(struct snd_soc_component *component,
3139 }
3140
3141 static const struct snd_soc_dapm_widget q6afe_dai_widgets[] = {
3142 - SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, 0, 0, 0),
3143 - SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, 0, 0, 0),
3144 - SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, 0, 0, 0),
3145 - SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, 0, 0, 0),
3146 - SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, 0, 0, 0),
3147 - SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, 0, 0, 0),
3148 - SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, 0, 0, 0),
3149 - SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, 0, 0, 0),
3150 - SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, 0, 0, 0),
3151 - SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, 0, 0, 0),
3152 - SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, 0, 0, 0),
3153 - SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, 0, 0, 0),
3154 - SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, 0, 0, 0),
3155 - SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, 0, 0, 0),
3156 - SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, 0, 0, 0),
3157 + SND_SOC_DAPM_AIF_IN("HDMI_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3158 + SND_SOC_DAPM_AIF_IN("SLIMBUS_0_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3159 + SND_SOC_DAPM_AIF_IN("SLIMBUS_1_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3160 + SND_SOC_DAPM_AIF_IN("SLIMBUS_2_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3161 + SND_SOC_DAPM_AIF_IN("SLIMBUS_3_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3162 + SND_SOC_DAPM_AIF_IN("SLIMBUS_4_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3163 + SND_SOC_DAPM_AIF_IN("SLIMBUS_5_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3164 + SND_SOC_DAPM_AIF_IN("SLIMBUS_6_RX", NULL, 0, SND_SOC_NOPM, 0, 0),
3165 + SND_SOC_DAPM_AIF_OUT("SLIMBUS_0_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3166 + SND_SOC_DAPM_AIF_OUT("SLIMBUS_1_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3167 + SND_SOC_DAPM_AIF_OUT("SLIMBUS_2_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3168 + SND_SOC_DAPM_AIF_OUT("SLIMBUS_3_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3169 + SND_SOC_DAPM_AIF_OUT("SLIMBUS_4_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3170 + SND_SOC_DAPM_AIF_OUT("SLIMBUS_5_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3171 + SND_SOC_DAPM_AIF_OUT("SLIMBUS_6_TX", NULL, 0, SND_SOC_NOPM, 0, 0),
3172 SND_SOC_DAPM_AIF_IN("QUAT_MI2S_RX", NULL,
3173 - 0, 0, 0, 0),
3174 + 0, SND_SOC_NOPM, 0, 0),
3175 SND_SOC_DAPM_AIF_OUT("QUAT_MI2S_TX", NULL,
3176 - 0, 0, 0, 0),
3177 + 0, SND_SOC_NOPM, 0, 0),
3178 SND_SOC_DAPM_AIF_IN("TERT_MI2S_RX", NULL,
3179 - 0, 0, 0, 0),
3180 + 0, SND_SOC_NOPM, 0, 0),
3181 SND_SOC_DAPM_AIF_OUT("TERT_MI2S_TX", NULL,
3182 - 0, 0, 0, 0),
3183 + 0, SND_SOC_NOPM, 0, 0),
3184 SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX", NULL,
3185 - 0, 0, 0, 0),
3186 + 0, SND_SOC_NOPM, 0, 0),
3187 SND_SOC_DAPM_AIF_OUT("SEC_MI2S_TX", NULL,
3188 - 0, 0, 0, 0),
3189 + 0, SND_SOC_NOPM, 0, 0),
3190 SND_SOC_DAPM_AIF_IN("SEC_MI2S_RX_SD1",
3191 "Secondary MI2S Playback SD1",
3192 - 0, 0, 0, 0),
3193 + 0, SND_SOC_NOPM, 0, 0),
3194 SND_SOC_DAPM_AIF_IN("PRI_MI2S_RX", NULL,
3195 - 0, 0, 0, 0),
3196 + 0, SND_SOC_NOPM, 0, 0),
3197 SND_SOC_DAPM_AIF_OUT("PRI_MI2S_TX", NULL,
3198 - 0, 0, 0, 0),
3199 + 0, SND_SOC_NOPM, 0, 0),
3200
3201 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_0", NULL,
3202 - 0, 0, 0, 0),
3203 + 0, SND_SOC_NOPM, 0, 0),
3204 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_1", NULL,
3205 - 0, 0, 0, 0),
3206 + 0, SND_SOC_NOPM, 0, 0),
3207 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_2", NULL,
3208 - 0, 0, 0, 0),
3209 + 0, SND_SOC_NOPM, 0, 0),
3210 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_3", NULL,
3211 - 0, 0, 0, 0),
3212 + 0, SND_SOC_NOPM, 0, 0),
3213 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_4", NULL,
3214 - 0, 0, 0, 0),
3215 + 0, SND_SOC_NOPM, 0, 0),
3216 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_5", NULL,
3217 - 0, 0, 0, 0),
3218 + 0, SND_SOC_NOPM, 0, 0),
3219 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_6", NULL,
3220 - 0, 0, 0, 0),
3221 + 0, SND_SOC_NOPM, 0, 0),
3222 SND_SOC_DAPM_AIF_IN("PRIMARY_TDM_RX_7", NULL,
3223 - 0, 0, 0, 0),
3224 + 0, SND_SOC_NOPM, 0, 0),
3225 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_0", NULL,
3226 - 0, 0, 0, 0),
3227 + 0, SND_SOC_NOPM, 0, 0),
3228 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_1", NULL,
3229 - 0, 0, 0, 0),
3230 + 0, SND_SOC_NOPM, 0, 0),
3231 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_2", NULL,
3232 - 0, 0, 0, 0),
3233 + 0, SND_SOC_NOPM, 0, 0),
3234 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_3", NULL,
3235 - 0, 0, 0, 0),
3236 + 0, SND_SOC_NOPM, 0, 0),
3237 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_4", NULL,
3238 - 0, 0, 0, 0),
3239 + 0, SND_SOC_NOPM, 0, 0),
3240 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_5", NULL,
3241 - 0, 0, 0, 0),
3242 + 0, SND_SOC_NOPM, 0, 0),
3243 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_6", NULL,
3244 - 0, 0, 0, 0),
3245 + 0, SND_SOC_NOPM, 0, 0),
3246 SND_SOC_DAPM_AIF_OUT("PRIMARY_TDM_TX_7", NULL,
3247 - 0, 0, 0, 0),
3248 + 0, SND_SOC_NOPM, 0, 0),
3249
3250 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_0", NULL,
3251 - 0, 0, 0, 0),
3252 + 0, SND_SOC_NOPM, 0, 0),
3253 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_1", NULL,
3254 - 0, 0, 0, 0),
3255 + 0, SND_SOC_NOPM, 0, 0),
3256 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_2", NULL,
3257 - 0, 0, 0, 0),
3258 + 0, SND_SOC_NOPM, 0, 0),
3259 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_3", NULL,
3260 - 0, 0, 0, 0),
3261 + 0, SND_SOC_NOPM, 0, 0),
3262 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_4", NULL,
3263 - 0, 0, 0, 0),
3264 + 0, SND_SOC_NOPM, 0, 0),
3265 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_5", NULL,
3266 - 0, 0, 0, 0),
3267 + 0, SND_SOC_NOPM, 0, 0),
3268 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_6", NULL,
3269 - 0, 0, 0, 0),
3270 + 0, SND_SOC_NOPM, 0, 0),
3271 SND_SOC_DAPM_AIF_IN("SEC_TDM_RX_7", NULL,
3272 - 0, 0, 0, 0),
3273 + 0, SND_SOC_NOPM, 0, 0),
3274 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_0", NULL,
3275 - 0, 0, 0, 0),
3276 + 0, SND_SOC_NOPM, 0, 0),
3277 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_1", NULL,
3278 - 0, 0, 0, 0),
3279 + 0, SND_SOC_NOPM, 0, 0),
3280 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_2", NULL,
3281 - 0, 0, 0, 0),
3282 + 0, SND_SOC_NOPM, 0, 0),
3283 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_3", NULL,
3284 - 0, 0, 0, 0),
3285 + 0, SND_SOC_NOPM, 0, 0),
3286 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_4", NULL,
3287 - 0, 0, 0, 0),
3288 + 0, SND_SOC_NOPM, 0, 0),
3289 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_5", NULL,
3290 - 0, 0, 0, 0),
3291 + 0, SND_SOC_NOPM, 0, 0),
3292 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_6", NULL,
3293 - 0, 0, 0, 0),
3294 + 0, SND_SOC_NOPM, 0, 0),
3295 SND_SOC_DAPM_AIF_OUT("SEC_TDM_TX_7", NULL,
3296 - 0, 0, 0, 0),
3297 + 0, SND_SOC_NOPM, 0, 0),
3298
3299 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_0", NULL,
3300 - 0, 0, 0, 0),
3301 + 0, SND_SOC_NOPM, 0, 0),
3302 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_1", NULL,
3303 - 0, 0, 0, 0),
3304 + 0, SND_SOC_NOPM, 0, 0),
3305 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_2", NULL,
3306 - 0, 0, 0, 0),
3307 + 0, SND_SOC_NOPM, 0, 0),
3308 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_3", NULL,
3309 - 0, 0, 0, 0),
3310 + 0, SND_SOC_NOPM, 0, 0),
3311 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_4", NULL,
3312 - 0, 0, 0, 0),
3313 + 0, SND_SOC_NOPM, 0, 0),
3314 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_5", NULL,
3315 - 0, 0, 0, 0),
3316 + 0, SND_SOC_NOPM, 0, 0),
3317 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_6", NULL,
3318 - 0, 0, 0, 0),
3319 + 0, SND_SOC_NOPM, 0, 0),
3320 SND_SOC_DAPM_AIF_IN("TERT_TDM_RX_7", NULL,
3321 - 0, 0, 0, 0),
3322 + 0, SND_SOC_NOPM, 0, 0),
3323 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_0", NULL,
3324 - 0, 0, 0, 0),
3325 + 0, SND_SOC_NOPM, 0, 0),
3326 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_1", NULL,
3327 - 0, 0, 0, 0),
3328 + 0, SND_SOC_NOPM, 0, 0),
3329 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_2", NULL,
3330 - 0, 0, 0, 0),
3331 + 0, SND_SOC_NOPM, 0, 0),
3332 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_3", NULL,
3333 - 0, 0, 0, 0),
3334 + 0, SND_SOC_NOPM, 0, 0),
3335 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_4", NULL,
3336 - 0, 0, 0, 0),
3337 + 0, SND_SOC_NOPM, 0, 0),
3338 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_5", NULL,
3339 - 0, 0, 0, 0),
3340 + 0, SND_SOC_NOPM, 0, 0),
3341 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_6", NULL,
3342 - 0, 0, 0, 0),
3343 + 0, SND_SOC_NOPM, 0, 0),
3344 SND_SOC_DAPM_AIF_OUT("TERT_TDM_TX_7", NULL,
3345 - 0, 0, 0, 0),
3346 + 0, SND_SOC_NOPM, 0, 0),
3347
3348 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_0", NULL,
3349 - 0, 0, 0, 0),
3350 + 0, SND_SOC_NOPM, 0, 0),
3351 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_1", NULL,
3352 - 0, 0, 0, 0),
3353 + 0, SND_SOC_NOPM, 0, 0),
3354 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_2", NULL,
3355 - 0, 0, 0, 0),
3356 + 0, SND_SOC_NOPM, 0, 0),
3357 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_3", NULL,
3358 - 0, 0, 0, 0),
3359 + 0, SND_SOC_NOPM, 0, 0),
3360 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_4", NULL,
3361 - 0, 0, 0, 0),
3362 + 0, SND_SOC_NOPM, 0, 0),
3363 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_5", NULL,
3364 - 0, 0, 0, 0),
3365 + 0, SND_SOC_NOPM, 0, 0),
3366 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_6", NULL,
3367 - 0, 0, 0, 0),
3368 + 0, SND_SOC_NOPM, 0, 0),
3369 SND_SOC_DAPM_AIF_IN("QUAT_TDM_RX_7", NULL,
3370 - 0, 0, 0, 0),
3371 + 0, SND_SOC_NOPM, 0, 0),
3372 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_0", NULL,
3373 - 0, 0, 0, 0),
3374 + 0, SND_SOC_NOPM, 0, 0),
3375 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_1", NULL,
3376 - 0, 0, 0, 0),
3377 + 0, SND_SOC_NOPM, 0, 0),
3378 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_2", NULL,
3379 - 0, 0, 0, 0),
3380 + 0, SND_SOC_NOPM, 0, 0),
3381 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_3", NULL,
3382 - 0, 0, 0, 0),
3383 + 0, SND_SOC_NOPM, 0, 0),
3384 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_4", NULL,
3385 - 0, 0, 0, 0),
3386 + 0, SND_SOC_NOPM, 0, 0),
3387 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_5", NULL,
3388 - 0, 0, 0, 0),
3389 + 0, SND_SOC_NOPM, 0, 0),
3390 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_6", NULL,
3391 - 0, 0, 0, 0),
3392 + 0, SND_SOC_NOPM, 0, 0),
3393 SND_SOC_DAPM_AIF_OUT("QUAT_TDM_TX_7", NULL,
3394 - 0, 0, 0, 0),
3395 + 0, SND_SOC_NOPM, 0, 0),
3396
3397 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_0", NULL,
3398 - 0, 0, 0, 0),
3399 + 0, SND_SOC_NOPM, 0, 0),
3400 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_1", NULL,
3401 - 0, 0, 0, 0),
3402 + 0, SND_SOC_NOPM, 0, 0),
3403 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_2", NULL,
3404 - 0, 0, 0, 0),
3405 + 0, SND_SOC_NOPM, 0, 0),
3406 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_3", NULL,
3407 - 0, 0, 0, 0),
3408 + 0, SND_SOC_NOPM, 0, 0),
3409 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_4", NULL,
3410 - 0, 0, 0, 0),
3411 + 0, SND_SOC_NOPM, 0, 0),
3412 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_5", NULL,
3413 - 0, 0, 0, 0),
3414 + 0, SND_SOC_NOPM, 0, 0),
3415 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_6", NULL,
3416 - 0, 0, 0, 0),
3417 + 0, SND_SOC_NOPM, 0, 0),
3418 SND_SOC_DAPM_AIF_IN("QUIN_TDM_RX_7", NULL,
3419 - 0, 0, 0, 0),
3420 + 0, SND_SOC_NOPM, 0, 0),
3421 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_0", NULL,
3422 - 0, 0, 0, 0),
3423 + 0, SND_SOC_NOPM, 0, 0),
3424 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_1", NULL,
3425 - 0, 0, 0, 0),
3426 + 0, SND_SOC_NOPM, 0, 0),
3427 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_2", NULL,
3428 - 0, 0, 0, 0),
3429 + 0, SND_SOC_NOPM, 0, 0),
3430 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_3", NULL,
3431 - 0, 0, 0, 0),
3432 + 0, SND_SOC_NOPM, 0, 0),
3433 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_4", NULL,
3434 - 0, 0, 0, 0),
3435 + 0, SND_SOC_NOPM, 0, 0),
3436 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_5", NULL,
3437 - 0, 0, 0, 0),
3438 + 0, SND_SOC_NOPM, 0, 0),
3439 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_6", NULL,
3440 - 0, 0, 0, 0),
3441 + 0, SND_SOC_NOPM, 0, 0),
3442 SND_SOC_DAPM_AIF_OUT("QUIN_TDM_TX_7", NULL,
3443 - 0, 0, 0, 0),
3444 - SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, 0, 0, 0),
3445 + 0, SND_SOC_NOPM, 0, 0),
3446 + SND_SOC_DAPM_AIF_OUT("DISPLAY_PORT_RX", "NULL", 0, SND_SOC_NOPM, 0, 0),
3447 };
3448
3449 static const struct snd_soc_component_driver q6afe_dai_component = {
3450 diff --git a/sound/soc/qcom/qdsp6/q6routing.c b/sound/soc/qcom/qdsp6/q6routing.c
3451 index ddcd9978cf57b..745cc9dd14f38 100644
3452 --- a/sound/soc/qcom/qdsp6/q6routing.c
3453 +++ b/sound/soc/qcom/qdsp6/q6routing.c
3454 @@ -996,6 +996,20 @@ static int msm_routing_probe(struct snd_soc_component *c)
3455 return 0;
3456 }
3457
3458 +static unsigned int q6routing_reg_read(struct snd_soc_component *component,
3459 + unsigned int reg)
3460 +{
3461 + /* default value */
3462 + return 0;
3463 +}
3464 +
3465 +static int q6routing_reg_write(struct snd_soc_component *component,
3466 + unsigned int reg, unsigned int val)
3467 +{
3468 + /* dummy */
3469 + return 0;
3470 +}
3471 +
3472 static const struct snd_soc_component_driver msm_soc_routing_component = {
3473 .ops = &q6pcm_routing_ops,
3474 .probe = msm_routing_probe,
3475 @@ -1004,6 +1018,8 @@ static const struct snd_soc_component_driver msm_soc_routing_component = {
3476 .num_dapm_widgets = ARRAY_SIZE(msm_qdsp6_widgets),
3477 .dapm_routes = intercon,
3478 .num_dapm_routes = ARRAY_SIZE(intercon),
3479 + .read = q6routing_reg_read,
3480 + .write = q6routing_reg_write,
3481 };
3482
3483 static int q6pcm_routing_probe(struct platform_device *pdev)
3484 diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
3485 index ee08aeff30a19..f591c4d1b6fe2 100644
3486 --- a/tools/objtool/Makefile
3487 +++ b/tools/objtool/Makefile
3488 @@ -3,9 +3,15 @@ include ../scripts/Makefile.include
3489 include ../scripts/Makefile.arch
3490
3491 # always use the host compiler
3492 +ifneq ($(LLVM),)
3493 +HOSTAR ?= llvm-ar
3494 +HOSTCC ?= clang
3495 +HOSTLD ?= ld.lld
3496 +else
3497 HOSTAR ?= ar
3498 HOSTCC ?= gcc
3499 HOSTLD ?= ld
3500 +endif
3501 AR = $(HOSTAR)
3502 CC = $(HOSTCC)
3503 LD = $(HOSTLD)
3504 diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
3505 index dc9d495e3d6ab..849d8d2e5976b 100644
3506 --- a/tools/perf/util/probe-finder.c
3507 +++ b/tools/perf/util/probe-finder.c
3508 @@ -1362,7 +1362,7 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
3509 tf.ntevs = 0;
3510
3511 ret = debuginfo__find_probes(dbg, &tf.pf);
3512 - if (ret < 0) {
3513 + if (ret < 0 || tf.ntevs == 0) {
3514 for (i = 0; i < tf.ntevs; i++)
3515 clear_probe_trace_event(&tf.tevs[i]);
3516 zfree(tevs);
3517 diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
3518 index bdb69599c4bdc..5e939ff1e3f95 100644
3519 --- a/tools/testing/selftests/cgroup/cgroup_util.c
3520 +++ b/tools/testing/selftests/cgroup/cgroup_util.c
3521 @@ -105,7 +105,7 @@ int cg_read_strcmp(const char *cgroup, const char *control,
3522
3523 /* Handle the case of comparing against empty string */
3524 if (!expected)
3525 - size = 32;
3526 + return -1;
3527 else
3528 size = strlen(expected) + 1;
3529
3530 diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
3531 index 767ac4eab4fe9..7501ec8a46004 100644
3532 --- a/virt/kvm/arm/mmu.c
3533 +++ b/virt/kvm/arm/mmu.c
3534 @@ -332,7 +332,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
3535 * destroying the VM), otherwise another faulting VCPU may come in and mess
3536 * with things behind our backs.
3537 */
3538 -static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
3539 +static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
3540 + bool may_block)
3541 {
3542 pgd_t *pgd;
3543 phys_addr_t addr = start, end = start + size;
3544 @@ -357,11 +358,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
3545 * If the range is too large, release the kvm->mmu_lock
3546 * to prevent starvation and lockup detector warnings.
3547 */
3548 - if (next != end)
3549 + if (may_block && next != end)
3550 cond_resched_lock(&kvm->mmu_lock);
3551 } while (pgd++, addr = next, addr != end);
3552 }
3553
3554 +static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
3555 +{
3556 + __unmap_stage2_range(kvm, start, size, true);
3557 +}
3558 +
3559 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
3560 phys_addr_t addr, phys_addr_t end)
3561 {
3562 @@ -2045,18 +2051,21 @@ static int handle_hva_to_gpa(struct kvm *kvm,
3563
3564 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
3565 {
3566 - unmap_stage2_range(kvm, gpa, size);
3567 + unsigned flags = *(unsigned *)data;
3568 + bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
3569 +
3570 + __unmap_stage2_range(kvm, gpa, size, may_block);
3571 return 0;
3572 }
3573
3574 int kvm_unmap_hva_range(struct kvm *kvm,
3575 - unsigned long start, unsigned long end)
3576 + unsigned long start, unsigned long end, unsigned flags)
3577 {
3578 if (!kvm->arch.pgd)
3579 return 0;
3580
3581 trace_kvm_unmap_hva_range(start, end);
3582 - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
3583 + handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
3584 return 0;
3585 }
3586
3587 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
3588 index d5d4cd581af32..278bdc53047e8 100644
3589 --- a/virt/kvm/kvm_main.c
3590 +++ b/virt/kvm/kvm_main.c
3591 @@ -425,7 +425,8 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
3592 * count is also read inside the mmu_lock critical section.
3593 */
3594 kvm->mmu_notifier_count++;
3595 - need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end);
3596 + need_tlb_flush = kvm_unmap_hva_range(kvm, range->start, range->end,
3597 + range->flags);
3598 need_tlb_flush |= kvm->tlbs_dirty;
3599 /* we've to flush the tlb before the pages can be freed */
3600 if (need_tlb_flush)