Magellan Linux

Contents of /trunk/kernel-alx/patches-5.4/0228-5.4.129-all-fixes.patch

Parent Directory Parent Directory | Revision Log Revision Log


Revision 3635 - (show annotations) (download)
Mon Oct 24 12:34:12 2022 UTC (18 months, 1 week ago) by niro
File size: 94364 byte(s)
-sync kernel patches
1 diff --git a/Makefile b/Makefile
2 index 5db87d8031f1e..802520ad08cca 100644
3 --- a/Makefile
4 +++ b/Makefile
5 @@ -1,7 +1,7 @@
6 # SPDX-License-Identifier: GPL-2.0
7 VERSION = 5
8 PATCHLEVEL = 4
9 -SUBLEVEL = 128
10 +SUBLEVEL = 129
11 EXTRAVERSION =
12 NAME = Kleptomaniac Octopus
13
14 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
15 index 924285d0bccd9..43d6a6085d862 100644
16 --- a/arch/arm/kernel/setup.c
17 +++ b/arch/arm/kernel/setup.c
18 @@ -544,9 +544,11 @@ void notrace cpu_init(void)
19 * In Thumb-2, msr with an immediate value is not allowed.
20 */
21 #ifdef CONFIG_THUMB2_KERNEL
22 -#define PLC "r"
23 +#define PLC_l "l"
24 +#define PLC_r "r"
25 #else
26 -#define PLC "I"
27 +#define PLC_l "I"
28 +#define PLC_r "I"
29 #endif
30
31 /*
32 @@ -568,15 +570,15 @@ void notrace cpu_init(void)
33 "msr cpsr_c, %9"
34 :
35 : "r" (stk),
36 - PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
37 + PLC_r (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
38 "I" (offsetof(struct stack, irq[0])),
39 - PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
40 + PLC_r (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
41 "I" (offsetof(struct stack, abt[0])),
42 - PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
43 + PLC_r (PSR_F_BIT | PSR_I_BIT | UND_MODE),
44 "I" (offsetof(struct stack, und[0])),
45 - PLC (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
46 + PLC_r (PSR_F_BIT | PSR_I_BIT | FIQ_MODE),
47 "I" (offsetof(struct stack, fiq[0])),
48 - PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
49 + PLC_l (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
50 : "r14");
51 #endif
52 }
53 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
54 index cd8f3cdabfd07..d227cf87c48f3 100644
55 --- a/arch/arm64/Makefile
56 +++ b/arch/arm64/Makefile
57 @@ -10,7 +10,7 @@
58 #
59 # Copyright (C) 1995-2001 by Russell King
60
61 -LDFLAGS_vmlinux :=--no-undefined -X -z norelro
62 +LDFLAGS_vmlinux :=--no-undefined -X
63 CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
64 GZFLAGS :=-9
65
66 @@ -82,17 +82,21 @@ CHECKFLAGS += -D__AARCH64EB__
67 AS += -EB
68 # Prefer the baremetal ELF build target, but not all toolchains include
69 # it so fall back to the standard linux version if needed.
70 -KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb)
71 +KBUILD_LDFLAGS += -EB $(call ld-option, -maarch64elfb, -maarch64linuxb -z norelro)
72 UTS_MACHINE := aarch64_be
73 else
74 KBUILD_CPPFLAGS += -mlittle-endian
75 CHECKFLAGS += -D__AARCH64EL__
76 AS += -EL
77 # Same as above, prefer ELF but fall back to linux target if needed.
78 -KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux)
79 +KBUILD_LDFLAGS += -EL $(call ld-option, -maarch64elf, -maarch64linux -z norelro)
80 UTS_MACHINE := aarch64
81 endif
82
83 +ifeq ($(CONFIG_LD_IS_LLD), y)
84 +KBUILD_LDFLAGS += -z norelro
85 +endif
86 +
87 CHECKFLAGS += -D__aarch64__
88
89 ifeq ($(CONFIG_ARM64_MODULE_PLTS),y)
90 diff --git a/arch/mips/generic/board-boston.its.S b/arch/mips/generic/board-boston.its.S
91 index a7f51f97b9102..c45ad27594218 100644
92 --- a/arch/mips/generic/board-boston.its.S
93 +++ b/arch/mips/generic/board-boston.its.S
94 @@ -1,22 +1,22 @@
95 / {
96 images {
97 - fdt@boston {
98 + fdt-boston {
99 description = "img,boston Device Tree";
100 data = /incbin/("boot/dts/img/boston.dtb");
101 type = "flat_dt";
102 arch = "mips";
103 compression = "none";
104 - hash@0 {
105 + hash {
106 algo = "sha1";
107 };
108 };
109 };
110
111 configurations {
112 - conf@boston {
113 + conf-boston {
114 description = "Boston Linux kernel";
115 - kernel = "kernel@0";
116 - fdt = "fdt@boston";
117 + kernel = "kernel";
118 + fdt = "fdt-boston";
119 };
120 };
121 };
122 diff --git a/arch/mips/generic/board-ni169445.its.S b/arch/mips/generic/board-ni169445.its.S
123 index e4cb4f95a8cc1..0a2e8f7a8526f 100644
124 --- a/arch/mips/generic/board-ni169445.its.S
125 +++ b/arch/mips/generic/board-ni169445.its.S
126 @@ -1,22 +1,22 @@
127 / {
128 images {
129 - fdt@ni169445 {
130 + fdt-ni169445 {
131 description = "NI 169445 device tree";
132 data = /incbin/("boot/dts/ni/169445.dtb");
133 type = "flat_dt";
134 arch = "mips";
135 compression = "none";
136 - hash@0 {
137 + hash {
138 algo = "sha1";
139 };
140 };
141 };
142
143 configurations {
144 - conf@ni169445 {
145 + conf-ni169445 {
146 description = "NI 169445 Linux Kernel";
147 - kernel = "kernel@0";
148 - fdt = "fdt@ni169445";
149 + kernel = "kernel";
150 + fdt = "fdt-ni169445";
151 };
152 };
153 };
154 diff --git a/arch/mips/generic/board-ocelot.its.S b/arch/mips/generic/board-ocelot.its.S
155 index 3da23988149a6..8c7e3a1b68d3d 100644
156 --- a/arch/mips/generic/board-ocelot.its.S
157 +++ b/arch/mips/generic/board-ocelot.its.S
158 @@ -1,40 +1,40 @@
159 /* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
160 / {
161 images {
162 - fdt@ocelot_pcb123 {
163 + fdt-ocelot_pcb123 {
164 description = "MSCC Ocelot PCB123 Device Tree";
165 data = /incbin/("boot/dts/mscc/ocelot_pcb123.dtb");
166 type = "flat_dt";
167 arch = "mips";
168 compression = "none";
169 - hash@0 {
170 + hash {
171 algo = "sha1";
172 };
173 };
174
175 - fdt@ocelot_pcb120 {
176 + fdt-ocelot_pcb120 {
177 description = "MSCC Ocelot PCB120 Device Tree";
178 data = /incbin/("boot/dts/mscc/ocelot_pcb120.dtb");
179 type = "flat_dt";
180 arch = "mips";
181 compression = "none";
182 - hash@0 {
183 + hash {
184 algo = "sha1";
185 };
186 };
187 };
188
189 configurations {
190 - conf@ocelot_pcb123 {
191 + conf-ocelot_pcb123 {
192 description = "Ocelot Linux kernel";
193 - kernel = "kernel@0";
194 - fdt = "fdt@ocelot_pcb123";
195 + kernel = "kernel";
196 + fdt = "fdt-ocelot_pcb123";
197 };
198
199 - conf@ocelot_pcb120 {
200 + conf-ocelot_pcb120 {
201 description = "Ocelot Linux kernel";
202 - kernel = "kernel@0";
203 - fdt = "fdt@ocelot_pcb120";
204 + kernel = "kernel";
205 + fdt = "fdt-ocelot_pcb120";
206 };
207 };
208 };
209 diff --git a/arch/mips/generic/board-xilfpga.its.S b/arch/mips/generic/board-xilfpga.its.S
210 index a2e773d3f14f4..08c1e900eb4ed 100644
211 --- a/arch/mips/generic/board-xilfpga.its.S
212 +++ b/arch/mips/generic/board-xilfpga.its.S
213 @@ -1,22 +1,22 @@
214 / {
215 images {
216 - fdt@xilfpga {
217 + fdt-xilfpga {
218 description = "MIPSfpga (xilfpga) Device Tree";
219 data = /incbin/("boot/dts/xilfpga/nexys4ddr.dtb");
220 type = "flat_dt";
221 arch = "mips";
222 compression = "none";
223 - hash@0 {
224 + hash {
225 algo = "sha1";
226 };
227 };
228 };
229
230 configurations {
231 - conf@xilfpga {
232 + conf-xilfpga {
233 description = "MIPSfpga Linux kernel";
234 - kernel = "kernel@0";
235 - fdt = "fdt@xilfpga";
236 + kernel = "kernel";
237 + fdt = "fdt-xilfpga";
238 };
239 };
240 };
241 diff --git a/arch/mips/generic/vmlinux.its.S b/arch/mips/generic/vmlinux.its.S
242 index 1a08438fd8930..3e254676540f4 100644
243 --- a/arch/mips/generic/vmlinux.its.S
244 +++ b/arch/mips/generic/vmlinux.its.S
245 @@ -6,7 +6,7 @@
246 #address-cells = <ADDR_CELLS>;
247
248 images {
249 - kernel@0 {
250 + kernel {
251 description = KERNEL_NAME;
252 data = /incbin/(VMLINUX_BINARY);
253 type = "kernel";
254 @@ -15,18 +15,18 @@
255 compression = VMLINUX_COMPRESSION;
256 load = /bits/ ADDR_BITS <VMLINUX_LOAD_ADDRESS>;
257 entry = /bits/ ADDR_BITS <VMLINUX_ENTRY_ADDRESS>;
258 - hash@0 {
259 + hash {
260 algo = "sha1";
261 };
262 };
263 };
264
265 configurations {
266 - default = "conf@default";
267 + default = "conf-default";
268
269 - conf@default {
270 + conf-default {
271 description = "Generic Linux kernel";
272 - kernel = "kernel@0";
273 + kernel = "kernel";
274 };
275 };
276 };
277 diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
278 index 0c67a5a94de30..76959a7d88c82 100644
279 --- a/arch/x86/pci/fixup.c
280 +++ b/arch/x86/pci/fixup.c
281 @@ -779,4 +779,48 @@ DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1571, pci_amd_enable_64bit_bar);
282 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x15b1, pci_amd_enable_64bit_bar);
283 DECLARE_PCI_FIXUP_RESUME(PCI_VENDOR_ID_AMD, 0x1601, pci_amd_enable_64bit_bar);
284
285 +#define RS690_LOWER_TOP_OF_DRAM2 0x30
286 +#define RS690_LOWER_TOP_OF_DRAM2_VALID 0x1
287 +#define RS690_UPPER_TOP_OF_DRAM2 0x31
288 +#define RS690_HTIU_NB_INDEX 0xA8
289 +#define RS690_HTIU_NB_INDEX_WR_ENABLE 0x100
290 +#define RS690_HTIU_NB_DATA 0xAC
291 +
292 +/*
293 + * Some BIOS implementations support RAM above 4GB, but do not configure the
294 + * PCI host to respond to bus master accesses for these addresses. These
295 + * implementations set the TOP_OF_DRAM_SLOT1 register correctly, so PCI DMA
296 + * works as expected for addresses below 4GB.
297 + *
298 + * Reference: "AMD RS690 ASIC Family Register Reference Guide" (pg. 2-57)
299 + * https://www.amd.com/system/files/TechDocs/43372_rs690_rrg_3.00o.pdf
300 + */
301 +static void rs690_fix_64bit_dma(struct pci_dev *pdev)
302 +{
303 + u32 val = 0;
304 + phys_addr_t top_of_dram = __pa(high_memory - 1) + 1;
305 +
306 + if (top_of_dram <= (1ULL << 32))
307 + return;
308 +
309 + pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
310 + RS690_LOWER_TOP_OF_DRAM2);
311 + pci_read_config_dword(pdev, RS690_HTIU_NB_DATA, &val);
312 +
313 + if (val)
314 + return;
315 +
316 + pci_info(pdev, "Adjusting top of DRAM to %pa for 64-bit DMA support\n", &top_of_dram);
317 +
318 + pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
319 + RS690_UPPER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
320 + pci_write_config_dword(pdev, RS690_HTIU_NB_DATA, top_of_dram >> 32);
321 +
322 + pci_write_config_dword(pdev, RS690_HTIU_NB_INDEX,
323 + RS690_LOWER_TOP_OF_DRAM2 | RS690_HTIU_NB_INDEX_WR_ENABLE);
324 + pci_write_config_dword(pdev, RS690_HTIU_NB_DATA,
325 + top_of_dram | RS690_LOWER_TOP_OF_DRAM2_VALID);
326 +}
327 +DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7910, rs690_fix_64bit_dma);
328 +
329 #endif
330 diff --git a/certs/Kconfig b/certs/Kconfig
331 index c94e93d8bccf0..76e469b56a773 100644
332 --- a/certs/Kconfig
333 +++ b/certs/Kconfig
334 @@ -83,4 +83,13 @@ config SYSTEM_BLACKLIST_HASH_LIST
335 wrapper to incorporate the list into the kernel. Each <hash> should
336 be a string of hex digits.
337
338 +config SYSTEM_REVOCATION_LIST
339 + bool "Provide system-wide ring of revocation certificates"
340 + depends on SYSTEM_BLACKLIST_KEYRING
341 + depends on PKCS7_MESSAGE_PARSER=y
342 + help
343 + If set, this allows revocation certificates to be stored in the
344 + blacklist keyring and implements a hook whereby a PKCS#7 message can
345 + be checked to see if it matches such a certificate.
346 +
347 endmenu
348 diff --git a/certs/Makefile b/certs/Makefile
349 index f4c25b67aad90..f4b90bad8690a 100644
350 --- a/certs/Makefile
351 +++ b/certs/Makefile
352 @@ -3,7 +3,7 @@
353 # Makefile for the linux kernel signature checking certificates.
354 #
355
356 -obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o
357 +obj-$(CONFIG_SYSTEM_TRUSTED_KEYRING) += system_keyring.o system_certificates.o common.o
358 obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist.o
359 ifneq ($(CONFIG_SYSTEM_BLACKLIST_HASH_LIST),"")
360 obj-$(CONFIG_SYSTEM_BLACKLIST_KEYRING) += blacklist_hashes.o
361 diff --git a/certs/blacklist.c b/certs/blacklist.c
362 index 025a41de28fda..59b2f106b2940 100644
363 --- a/certs/blacklist.c
364 +++ b/certs/blacklist.c
365 @@ -135,6 +135,58 @@ int is_hash_blacklisted(const u8 *hash, size_t hash_len, const char *type)
366 }
367 EXPORT_SYMBOL_GPL(is_hash_blacklisted);
368
369 +int is_binary_blacklisted(const u8 *hash, size_t hash_len)
370 +{
371 + if (is_hash_blacklisted(hash, hash_len, "bin") == -EKEYREJECTED)
372 + return -EPERM;
373 +
374 + return 0;
375 +}
376 +EXPORT_SYMBOL_GPL(is_binary_blacklisted);
377 +
378 +#ifdef CONFIG_SYSTEM_REVOCATION_LIST
379 +/**
380 + * add_key_to_revocation_list - Add a revocation certificate to the blacklist
381 + * @data: The data blob containing the certificate
382 + * @size: The size of data blob
383 + */
384 +int add_key_to_revocation_list(const char *data, size_t size)
385 +{
386 + key_ref_t key;
387 +
388 + key = key_create_or_update(make_key_ref(blacklist_keyring, true),
389 + "asymmetric",
390 + NULL,
391 + data,
392 + size,
393 + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW),
394 + KEY_ALLOC_NOT_IN_QUOTA | KEY_ALLOC_BUILT_IN);
395 +
396 + if (IS_ERR(key)) {
397 + pr_err("Problem with revocation key (%ld)\n", PTR_ERR(key));
398 + return PTR_ERR(key);
399 + }
400 +
401 + return 0;
402 +}
403 +
404 +/**
405 + * is_key_on_revocation_list - Determine if the key for a PKCS#7 message is revoked
406 + * @pkcs7: The PKCS#7 message to check
407 + */
408 +int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
409 +{
410 + int ret;
411 +
412 + ret = pkcs7_validate_trust(pkcs7, blacklist_keyring);
413 +
414 + if (ret == 0)
415 + return -EKEYREJECTED;
416 +
417 + return -ENOKEY;
418 +}
419 +#endif
420 +
421 /*
422 * Initialise the blacklist
423 */
424 diff --git a/certs/blacklist.h b/certs/blacklist.h
425 index 1efd6fa0dc608..51b320cf85749 100644
426 --- a/certs/blacklist.h
427 +++ b/certs/blacklist.h
428 @@ -1,3 +1,5 @@
429 #include <linux/kernel.h>
430 +#include <linux/errno.h>
431 +#include <crypto/pkcs7.h>
432
433 extern const char __initconst *const blacklist_hashes[];
434 diff --git a/certs/common.c b/certs/common.c
435 new file mode 100644
436 index 0000000000000..16a220887a53e
437 --- /dev/null
438 +++ b/certs/common.c
439 @@ -0,0 +1,57 @@
440 +// SPDX-License-Identifier: GPL-2.0-or-later
441 +
442 +#include <linux/kernel.h>
443 +#include <linux/key.h>
444 +#include "common.h"
445 +
446 +int load_certificate_list(const u8 cert_list[],
447 + const unsigned long list_size,
448 + const struct key *keyring)
449 +{
450 + key_ref_t key;
451 + const u8 *p, *end;
452 + size_t plen;
453 +
454 + p = cert_list;
455 + end = p + list_size;
456 + while (p < end) {
457 + /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
458 + * than 256 bytes in size.
459 + */
460 + if (end - p < 4)
461 + goto dodgy_cert;
462 + if (p[0] != 0x30 &&
463 + p[1] != 0x82)
464 + goto dodgy_cert;
465 + plen = (p[2] << 8) | p[3];
466 + plen += 4;
467 + if (plen > end - p)
468 + goto dodgy_cert;
469 +
470 + key = key_create_or_update(make_key_ref(keyring, 1),
471 + "asymmetric",
472 + NULL,
473 + p,
474 + plen,
475 + ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
476 + KEY_USR_VIEW | KEY_USR_READ),
477 + KEY_ALLOC_NOT_IN_QUOTA |
478 + KEY_ALLOC_BUILT_IN |
479 + KEY_ALLOC_BYPASS_RESTRICTION);
480 + if (IS_ERR(key)) {
481 + pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
482 + PTR_ERR(key));
483 + } else {
484 + pr_notice("Loaded X.509 cert '%s'\n",
485 + key_ref_to_ptr(key)->description);
486 + key_ref_put(key);
487 + }
488 + p += plen;
489 + }
490 +
491 + return 0;
492 +
493 +dodgy_cert:
494 + pr_err("Problem parsing in-kernel X.509 certificate list\n");
495 + return 0;
496 +}
497 diff --git a/certs/common.h b/certs/common.h
498 new file mode 100644
499 index 0000000000000..abdb5795936b7
500 --- /dev/null
501 +++ b/certs/common.h
502 @@ -0,0 +1,9 @@
503 +/* SPDX-License-Identifier: GPL-2.0-or-later */
504 +
505 +#ifndef _CERT_COMMON_H
506 +#define _CERT_COMMON_H
507 +
508 +int load_certificate_list(const u8 cert_list[], const unsigned long list_size,
509 + const struct key *keyring);
510 +
511 +#endif
512 diff --git a/certs/system_keyring.c b/certs/system_keyring.c
513 index 798291177186c..a44a8915c94cf 100644
514 --- a/certs/system_keyring.c
515 +++ b/certs/system_keyring.c
516 @@ -15,6 +15,7 @@
517 #include <keys/asymmetric-type.h>
518 #include <keys/system_keyring.h>
519 #include <crypto/pkcs7.h>
520 +#include "common.h"
521
522 static struct key *builtin_trusted_keys;
523 #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
524 @@ -136,54 +137,10 @@ device_initcall(system_trusted_keyring_init);
525 */
526 static __init int load_system_certificate_list(void)
527 {
528 - key_ref_t key;
529 - const u8 *p, *end;
530 - size_t plen;
531 -
532 pr_notice("Loading compiled-in X.509 certificates\n");
533
534 - p = system_certificate_list;
535 - end = p + system_certificate_list_size;
536 - while (p < end) {
537 - /* Each cert begins with an ASN.1 SEQUENCE tag and must be more
538 - * than 256 bytes in size.
539 - */
540 - if (end - p < 4)
541 - goto dodgy_cert;
542 - if (p[0] != 0x30 &&
543 - p[1] != 0x82)
544 - goto dodgy_cert;
545 - plen = (p[2] << 8) | p[3];
546 - plen += 4;
547 - if (plen > end - p)
548 - goto dodgy_cert;
549 -
550 - key = key_create_or_update(make_key_ref(builtin_trusted_keys, 1),
551 - "asymmetric",
552 - NULL,
553 - p,
554 - plen,
555 - ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
556 - KEY_USR_VIEW | KEY_USR_READ),
557 - KEY_ALLOC_NOT_IN_QUOTA |
558 - KEY_ALLOC_BUILT_IN |
559 - KEY_ALLOC_BYPASS_RESTRICTION);
560 - if (IS_ERR(key)) {
561 - pr_err("Problem loading in-kernel X.509 certificate (%ld)\n",
562 - PTR_ERR(key));
563 - } else {
564 - pr_notice("Loaded X.509 cert '%s'\n",
565 - key_ref_to_ptr(key)->description);
566 - key_ref_put(key);
567 - }
568 - p += plen;
569 - }
570 -
571 - return 0;
572 -
573 -dodgy_cert:
574 - pr_err("Problem parsing in-kernel X.509 certificate list\n");
575 - return 0;
576 + return load_certificate_list(system_certificate_list, system_certificate_list_size,
577 + builtin_trusted_keys);
578 }
579 late_initcall(load_system_certificate_list);
580
581 @@ -241,6 +198,12 @@ int verify_pkcs7_message_sig(const void *data, size_t len,
582 pr_devel("PKCS#7 platform keyring is not available\n");
583 goto error;
584 }
585 +
586 + ret = is_key_on_revocation_list(pkcs7);
587 + if (ret != -ENOKEY) {
588 + pr_devel("PKCS#7 platform key is on revocation list\n");
589 + goto error;
590 + }
591 }
592 ret = pkcs7_validate_trust(pkcs7, trusted_keys);
593 if (ret < 0) {
594 diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c
595 index f40051d6aecbc..9c0ea13ca7883 100644
596 --- a/drivers/dma/mediatek/mtk-uart-apdma.c
597 +++ b/drivers/dma/mediatek/mtk-uart-apdma.c
598 @@ -131,10 +131,7 @@ static unsigned int mtk_uart_apdma_read(struct mtk_chan *c, unsigned int reg)
599
600 static void mtk_uart_apdma_desc_free(struct virt_dma_desc *vd)
601 {
602 - struct dma_chan *chan = vd->tx.chan;
603 - struct mtk_chan *c = to_mtk_uart_apdma_chan(chan);
604 -
605 - kfree(c->desc);
606 + kfree(container_of(vd, struct mtk_uart_apdma_desc, vd));
607 }
608
609 static void mtk_uart_apdma_start_tx(struct mtk_chan *c)
610 @@ -207,14 +204,9 @@ static void mtk_uart_apdma_start_rx(struct mtk_chan *c)
611
612 static void mtk_uart_apdma_tx_handler(struct mtk_chan *c)
613 {
614 - struct mtk_uart_apdma_desc *d = c->desc;
615 -
616 mtk_uart_apdma_write(c, VFF_INT_FLAG, VFF_TX_INT_CLR_B);
617 mtk_uart_apdma_write(c, VFF_INT_EN, VFF_INT_EN_CLR_B);
618 mtk_uart_apdma_write(c, VFF_EN, VFF_EN_CLR_B);
619 -
620 - list_del(&d->vd.node);
621 - vchan_cookie_complete(&d->vd);
622 }
623
624 static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
625 @@ -245,9 +237,17 @@ static void mtk_uart_apdma_rx_handler(struct mtk_chan *c)
626
627 c->rx_status = d->avail_len - cnt;
628 mtk_uart_apdma_write(c, VFF_RPT, wg);
629 +}
630
631 - list_del(&d->vd.node);
632 - vchan_cookie_complete(&d->vd);
633 +static void mtk_uart_apdma_chan_complete_handler(struct mtk_chan *c)
634 +{
635 + struct mtk_uart_apdma_desc *d = c->desc;
636 +
637 + if (d) {
638 + list_del(&d->vd.node);
639 + vchan_cookie_complete(&d->vd);
640 + c->desc = NULL;
641 + }
642 }
643
644 static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
645 @@ -261,6 +261,7 @@ static irqreturn_t mtk_uart_apdma_irq_handler(int irq, void *dev_id)
646 mtk_uart_apdma_rx_handler(c);
647 else if (c->dir == DMA_MEM_TO_DEV)
648 mtk_uart_apdma_tx_handler(c);
649 + mtk_uart_apdma_chan_complete_handler(c);
650 spin_unlock_irqrestore(&c->vc.lock, flags);
651
652 return IRQ_HANDLED;
653 @@ -348,7 +349,7 @@ static struct dma_async_tx_descriptor *mtk_uart_apdma_prep_slave_sg
654 return NULL;
655
656 /* Now allocate and setup the descriptor */
657 - d = kzalloc(sizeof(*d), GFP_ATOMIC);
658 + d = kzalloc(sizeof(*d), GFP_NOWAIT);
659 if (!d)
660 return NULL;
661
662 @@ -366,7 +367,7 @@ static void mtk_uart_apdma_issue_pending(struct dma_chan *chan)
663 unsigned long flags;
664
665 spin_lock_irqsave(&c->vc.lock, flags);
666 - if (vchan_issue_pending(&c->vc)) {
667 + if (vchan_issue_pending(&c->vc) && !c->desc) {
668 vd = vchan_next_desc(&c->vc);
669 c->desc = to_mtk_uart_apdma_desc(&vd->tx);
670
671 diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
672 index 3993ab65c62cd..89eb9ea258149 100644
673 --- a/drivers/dma/sh/rcar-dmac.c
674 +++ b/drivers/dma/sh/rcar-dmac.c
675 @@ -1855,7 +1855,7 @@ static int rcar_dmac_probe(struct platform_device *pdev)
676
677 /* Enable runtime PM and initialize the device. */
678 pm_runtime_enable(&pdev->dev);
679 - ret = pm_runtime_get_sync(&pdev->dev);
680 + ret = pm_runtime_resume_and_get(&pdev->dev);
681 if (ret < 0) {
682 dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
683 return ret;
684 diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
685 index d47749a35863f..84009c5e0f330 100644
686 --- a/drivers/dma/xilinx/zynqmp_dma.c
687 +++ b/drivers/dma/xilinx/zynqmp_dma.c
688 @@ -467,7 +467,7 @@ static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
689 struct zynqmp_dma_desc_sw *desc;
690 int i, ret;
691
692 - ret = pm_runtime_get_sync(chan->dev);
693 + ret = pm_runtime_resume_and_get(chan->dev);
694 if (ret < 0)
695 return ret;
696
697 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
698 index 9964ec0035ede..1d8739a4fbcad 100644
699 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
700 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
701 @@ -3416,12 +3416,8 @@ static int gfx_v10_0_kiq_init_register(struct amdgpu_ring *ring)
702 if (ring->use_doorbell) {
703 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
704 (adev->doorbell_index.kiq * 2) << 2);
705 - /* If GC has entered CGPG, ringing doorbell > first page doesn't
706 - * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
707 - * this issue.
708 - */
709 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
710 - (adev->doorbell.size - 4));
711 + (adev->doorbell_index.userqueue_end * 2) << 2);
712 }
713
714 WREG32_SOC15(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
715 diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
716 index 354da41f52def..06cdc22b5501d 100644
717 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
718 +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
719 @@ -3593,12 +3593,8 @@ static int gfx_v9_0_kiq_init_register(struct amdgpu_ring *ring)
720 if (ring->use_doorbell) {
721 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_LOWER,
722 (adev->doorbell_index.kiq * 2) << 2);
723 - /* If GC has entered CGPG, ringing doorbell > first page doesn't
724 - * wakeup GC. Enlarge CP_MEC_DOORBELL_RANGE_UPPER to workaround
725 - * this issue.
726 - */
727 WREG32_SOC15(GC, 0, mmCP_MEC_DOORBELL_RANGE_UPPER,
728 - (adev->doorbell.size - 4));
729 + (adev->doorbell_index.userqueue_end * 2) << 2);
730 }
731
732 WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL,
733 diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c
734 index bae6a3eccee0b..f9ee562f72d33 100644
735 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c
736 +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c
737 @@ -112,7 +112,22 @@ int nouveau_gem_prime_pin(struct drm_gem_object *obj)
738 if (ret)
739 return -EINVAL;
740
741 - return 0;
742 + ret = ttm_bo_reserve(&nvbo->bo, false, false, NULL);
743 + if (ret)
744 + goto error;
745 +
746 + if (nvbo->bo.moving)
747 + ret = dma_fence_wait(nvbo->bo.moving, true);
748 +
749 + ttm_bo_unreserve(&nvbo->bo);
750 + if (ret)
751 + goto error;
752 +
753 + return ret;
754 +
755 +error:
756 + nouveau_bo_unpin(nvbo);
757 + return ret;
758 }
759
760 void nouveau_gem_prime_unpin(struct drm_gem_object *obj)
761 diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
762 index b906e8fbd5f3a..7bc33a80934c4 100644
763 --- a/drivers/gpu/drm/radeon/radeon_prime.c
764 +++ b/drivers/gpu/drm/radeon/radeon_prime.c
765 @@ -94,9 +94,19 @@ int radeon_gem_prime_pin(struct drm_gem_object *obj)
766
767 /* pin buffer into GTT */
768 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
769 - if (likely(ret == 0))
770 - bo->prime_shared_count++;
771 -
772 + if (unlikely(ret))
773 + goto error;
774 +
775 + if (bo->tbo.moving) {
776 + ret = dma_fence_wait(bo->tbo.moving, false);
777 + if (unlikely(ret)) {
778 + radeon_bo_unpin(bo);
779 + goto error;
780 + }
781 + }
782 +
783 + bo->prime_shared_count++;
784 +error:
785 radeon_bo_unreserve(bo);
786 return ret;
787 }
788 diff --git a/drivers/i2c/busses/i2c-robotfuzz-osif.c b/drivers/i2c/busses/i2c-robotfuzz-osif.c
789 index a39f7d0927973..66dfa211e736b 100644
790 --- a/drivers/i2c/busses/i2c-robotfuzz-osif.c
791 +++ b/drivers/i2c/busses/i2c-robotfuzz-osif.c
792 @@ -83,7 +83,7 @@ static int osif_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
793 }
794 }
795
796 - ret = osif_usb_read(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
797 + ret = osif_usb_write(adapter, OSIFI2C_STOP, 0, 0, NULL, 0);
798 if (ret) {
799 dev_err(&adapter->dev, "failure sending STOP\n");
800 return -EREMOTEIO;
801 @@ -153,7 +153,7 @@ static int osif_probe(struct usb_interface *interface,
802 * Set bus frequency. The frequency is:
803 * 120,000,000 / ( 16 + 2 * div * 4^prescale).
804 * Using dev = 52, prescale = 0 give 100KHz */
805 - ret = osif_usb_read(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
806 + ret = osif_usb_write(&priv->adapter, OSIFI2C_SET_BIT_RATE, 52, 0,
807 NULL, 0);
808 if (ret) {
809 dev_err(&interface->dev, "failure sending bit rate");
810 diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
811 index 545c3f2f8a06c..a3e3b274f0ea3 100644
812 --- a/drivers/mmc/host/meson-gx-mmc.c
813 +++ b/drivers/mmc/host/meson-gx-mmc.c
814 @@ -166,6 +166,7 @@ struct meson_host {
815
816 unsigned int bounce_buf_size;
817 void *bounce_buf;
818 + void __iomem *bounce_iomem_buf;
819 dma_addr_t bounce_dma_addr;
820 struct sd_emmc_desc *descs;
821 dma_addr_t descs_dma_addr;
822 @@ -737,6 +738,47 @@ static void meson_mmc_desc_chain_transfer(struct mmc_host *mmc, u32 cmd_cfg)
823 writel(start, host->regs + SD_EMMC_START);
824 }
825
826 +/* local sg copy to buffer version with _to/fromio usage for dram_access_quirk */
827 +static void meson_mmc_copy_buffer(struct meson_host *host, struct mmc_data *data,
828 + size_t buflen, bool to_buffer)
829 +{
830 + unsigned int sg_flags = SG_MITER_ATOMIC;
831 + struct scatterlist *sgl = data->sg;
832 + unsigned int nents = data->sg_len;
833 + struct sg_mapping_iter miter;
834 + unsigned int offset = 0;
835 +
836 + if (to_buffer)
837 + sg_flags |= SG_MITER_FROM_SG;
838 + else
839 + sg_flags |= SG_MITER_TO_SG;
840 +
841 + sg_miter_start(&miter, sgl, nents, sg_flags);
842 +
843 + while ((offset < buflen) && sg_miter_next(&miter)) {
844 + unsigned int len;
845 +
846 + len = min(miter.length, buflen - offset);
847 +
848 + /* When dram_access_quirk, the bounce buffer is a iomem mapping */
849 + if (host->dram_access_quirk) {
850 + if (to_buffer)
851 + memcpy_toio(host->bounce_iomem_buf + offset, miter.addr, len);
852 + else
853 + memcpy_fromio(miter.addr, host->bounce_iomem_buf + offset, len);
854 + } else {
855 + if (to_buffer)
856 + memcpy(host->bounce_buf + offset, miter.addr, len);
857 + else
858 + memcpy(miter.addr, host->bounce_buf + offset, len);
859 + }
860 +
861 + offset += len;
862 + }
863 +
864 + sg_miter_stop(&miter);
865 +}
866 +
867 static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
868 {
869 struct meson_host *host = mmc_priv(mmc);
870 @@ -780,8 +822,7 @@ static void meson_mmc_start_cmd(struct mmc_host *mmc, struct mmc_command *cmd)
871 if (data->flags & MMC_DATA_WRITE) {
872 cmd_cfg |= CMD_CFG_DATA_WR;
873 WARN_ON(xfer_bytes > host->bounce_buf_size);
874 - sg_copy_to_buffer(data->sg, data->sg_len,
875 - host->bounce_buf, xfer_bytes);
876 + meson_mmc_copy_buffer(host, data, xfer_bytes, true);
877 dma_wmb();
878 }
879
880 @@ -950,8 +991,7 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
881 if (meson_mmc_bounce_buf_read(data)) {
882 xfer_bytes = data->blksz * data->blocks;
883 WARN_ON(xfer_bytes > host->bounce_buf_size);
884 - sg_copy_from_buffer(data->sg, data->sg_len,
885 - host->bounce_buf, xfer_bytes);
886 + meson_mmc_copy_buffer(host, data, xfer_bytes, false);
887 }
888
889 next_cmd = meson_mmc_get_next_command(cmd);
890 @@ -1179,7 +1219,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
891 * instead of the DDR memory
892 */
893 host->bounce_buf_size = SD_EMMC_SRAM_DATA_BUF_LEN;
894 - host->bounce_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
895 + host->bounce_iomem_buf = host->regs + SD_EMMC_SRAM_DATA_BUF_OFF;
896 host->bounce_dma_addr = res->start + SD_EMMC_SRAM_DATA_BUF_OFF;
897 } else {
898 /* data bounce buffer */
899 diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
900 index 0f2bee59a82b0..0bc7f6518fb32 100644
901 --- a/drivers/net/caif/caif_serial.c
902 +++ b/drivers/net/caif/caif_serial.c
903 @@ -351,6 +351,7 @@ static int ldisc_open(struct tty_struct *tty)
904 rtnl_lock();
905 result = register_netdevice(dev);
906 if (result) {
907 + tty_kref_put(tty);
908 rtnl_unlock();
909 free_netdev(dev);
910 return -ENODEV;
911 diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
912 index 5c6a276f69ac4..426b8098c50ee 100644
913 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
914 +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
915 @@ -1293,9 +1293,11 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
916 p_hwfn->p_dcbx_info->set.ver_num |= DCBX_CONFIG_VERSION_STATIC;
917
918 p_hwfn->p_dcbx_info->set.enabled = dcbx_info->operational.enabled;
919 + BUILD_BUG_ON(sizeof(dcbx_info->operational.params) !=
920 + sizeof(p_hwfn->p_dcbx_info->set.config.params));
921 memcpy(&p_hwfn->p_dcbx_info->set.config.params,
922 &dcbx_info->operational.params,
923 - sizeof(struct qed_dcbx_admin_params));
924 + sizeof(p_hwfn->p_dcbx_info->set.config.params));
925 p_hwfn->p_dcbx_info->set.config.valid = true;
926
927 memcpy(params, &p_hwfn->p_dcbx_info->set, sizeof(struct qed_dcbx_set));
928 diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c
929 index 8ff178fc2670c..661202e854121 100644
930 --- a/drivers/net/ethernet/realtek/r8169_main.c
931 +++ b/drivers/net/ethernet/realtek/r8169_main.c
932 @@ -1801,7 +1801,7 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
933 {
934 switch(stringset) {
935 case ETH_SS_STATS:
936 - memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
937 + memcpy(data, rtl8169_gstrings, sizeof(rtl8169_gstrings));
938 break;
939 }
940 }
941 diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
942 index a042f4607b0d0..931a44fe7afe8 100644
943 --- a/drivers/net/ethernet/renesas/sh_eth.c
944 +++ b/drivers/net/ethernet/renesas/sh_eth.c
945 @@ -2322,7 +2322,7 @@ static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
946 {
947 switch (stringset) {
948 case ETH_SS_STATS:
949 - memcpy(data, *sh_eth_gstrings_stats,
950 + memcpy(data, sh_eth_gstrings_stats,
951 sizeof(sh_eth_gstrings_stats));
952 break;
953 }
954 diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
955 index 9b55fbdc3a7c6..9a7af7dda70dc 100644
956 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c
957 +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
958 @@ -770,12 +770,15 @@ static void temac_start_xmit_done(struct net_device *ndev)
959 stat = be32_to_cpu(cur_p->app0);
960
961 while (stat & STS_CTRL_APP0_CMPLT) {
962 + /* Make sure that the other fields are read after bd is
963 + * released by dma
964 + */
965 + rmb();
966 dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
967 be32_to_cpu(cur_p->len), DMA_TO_DEVICE);
968 skb = (struct sk_buff *)ptr_from_txbd(cur_p);
969 if (skb)
970 dev_consume_skb_irq(skb);
971 - cur_p->app0 = 0;
972 cur_p->app1 = 0;
973 cur_p->app2 = 0;
974 cur_p->app3 = 0;
975 @@ -784,6 +787,12 @@ static void temac_start_xmit_done(struct net_device *ndev)
976 ndev->stats.tx_packets++;
977 ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
978
979 + /* app0 must be visible last, as it is used to flag
980 + * availability of the bd
981 + */
982 + smp_mb();
983 + cur_p->app0 = 0;
984 +
985 lp->tx_bd_ci++;
986 if (lp->tx_bd_ci >= TX_BD_NUM)
987 lp->tx_bd_ci = 0;
988 @@ -810,6 +819,9 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
989 if (cur_p->app0)
990 return NETDEV_TX_BUSY;
991
992 + /* Make sure to read next bd app0 after this one */
993 + rmb();
994 +
995 tail++;
996 if (tail >= TX_BD_NUM)
997 tail = 0;
998 @@ -927,6 +939,11 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
999 wmb();
1000 lp->dma_out(lp, TX_TAILDESC_PTR, tail_p); /* DMA start */
1001
1002 + if (temac_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1003 + netdev_info(ndev, "%s -> netif_stop_queue\n", __func__);
1004 + netif_stop_queue(ndev);
1005 + }
1006 +
1007 return NETDEV_TX_OK;
1008 }
1009
1010 diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
1011 index 31a5595133628..87c0cdbf262ae 100644
1012 --- a/drivers/net/phy/dp83867.c
1013 +++ b/drivers/net/phy/dp83867.c
1014 @@ -468,16 +468,12 @@ static int dp83867_phy_reset(struct phy_device *phydev)
1015 {
1016 int err;
1017
1018 - err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
1019 + err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESTART);
1020 if (err < 0)
1021 return err;
1022
1023 usleep_range(10, 20);
1024
1025 - /* After reset FORCE_LINK_GOOD bit is set. Although the
1026 - * default value should be unset. Disable FORCE_LINK_GOOD
1027 - * for the phy to work properly.
1028 - */
1029 return phy_modify(phydev, MII_DP83867_PHYCTRL,
1030 DP83867_PHYCR_FORCE_LINK_GOOD, 0);
1031 }
1032 diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
1033 index f6d643ecaf39b..24d1246330375 100644
1034 --- a/drivers/net/usb/r8152.c
1035 +++ b/drivers/net/usb/r8152.c
1036 @@ -5065,7 +5065,7 @@ static void rtl8152_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1037 {
1038 switch (stringset) {
1039 case ETH_SS_STATS:
1040 - memcpy(data, *rtl8152_gstrings, sizeof(rtl8152_gstrings));
1041 + memcpy(data, rtl8152_gstrings, sizeof(rtl8152_gstrings));
1042 break;
1043 }
1044 }
1045 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
1046 index c48c68090d762..1033513d3d9de 100644
1047 --- a/drivers/net/wireless/mac80211_hwsim.c
1048 +++ b/drivers/net/wireless/mac80211_hwsim.c
1049 @@ -1458,8 +1458,13 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw)
1050 static void mac80211_hwsim_stop(struct ieee80211_hw *hw)
1051 {
1052 struct mac80211_hwsim_data *data = hw->priv;
1053 +
1054 data->started = false;
1055 hrtimer_cancel(&data->beacon_timer);
1056 +
1057 + while (!skb_queue_empty(&data->pending))
1058 + ieee80211_free_txskb(hw, skb_dequeue(&data->pending));
1059 +
1060 wiphy_dbg(hw->wiphy, "%s\n", __func__);
1061 }
1062
1063 diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
1064 index 34a06e89e176a..3c3bc9f584983 100644
1065 --- a/drivers/pci/pci.c
1066 +++ b/drivers/pci/pci.c
1067 @@ -1666,11 +1666,21 @@ static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1068 int err;
1069 int i, bars = 0;
1070
1071 - if (atomic_inc_return(&dev->enable_cnt) > 1) {
1072 - pci_update_current_state(dev, dev->current_state);
1073 - return 0; /* already enabled */
1074 + /*
1075 + * Power state could be unknown at this point, either due to a fresh
1076 + * boot or a device removal call. So get the current power state
1077 + * so that things like MSI message writing will behave as expected
1078 + * (e.g. if the device really is in D0 at enable time).
1079 + */
1080 + if (dev->pm_cap) {
1081 + u16 pmcsr;
1082 + pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1083 + dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1084 }
1085
1086 + if (atomic_inc_return(&dev->enable_cnt) > 1)
1087 + return 0; /* already enabled */
1088 +
1089 bridge = pci_upstream_bridge(dev);
1090 if (bridge)
1091 pci_enable_bridge(bridge);
1092 diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c
1093 index 2d5e0435af0a4..bac1d040bacab 100644
1094 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c
1095 +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c
1096 @@ -1153,7 +1153,7 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
1097 struct resource res;
1098 struct reset_control *rstc;
1099 int npins = STM32_GPIO_PINS_PER_BANK;
1100 - int bank_nr, err;
1101 + int bank_nr, err, i = 0;
1102
1103 rstc = of_reset_control_get_exclusive(np, NULL);
1104 if (!IS_ERR(rstc))
1105 @@ -1182,9 +1182,14 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl,
1106
1107 of_property_read_string(np, "st,bank-name", &bank->gpio_chip.label);
1108
1109 - if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, 0, &args)) {
1110 + if (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3, i, &args)) {
1111 bank_nr = args.args[1] / STM32_GPIO_PINS_PER_BANK;
1112 bank->gpio_chip.base = args.args[1];
1113 +
1114 + npins = args.args[2];
1115 + while (!of_parse_phandle_with_fixed_args(np, "gpio-ranges", 3,
1116 + ++i, &args))
1117 + npins += args.args[2];
1118 } else {
1119 bank_nr = pctl->nbanks;
1120 bank->gpio_chip.base = bank_nr * STM32_GPIO_PINS_PER_BANK;
1121 diff --git a/drivers/spi/spi-nxp-fspi.c b/drivers/spi/spi-nxp-fspi.c
1122 index efd9e908e2248..36a44a837031d 100644
1123 --- a/drivers/spi/spi-nxp-fspi.c
1124 +++ b/drivers/spi/spi-nxp-fspi.c
1125 @@ -975,12 +975,6 @@ static int nxp_fspi_probe(struct platform_device *pdev)
1126 goto err_put_ctrl;
1127 }
1128
1129 - /* Clear potential interrupts */
1130 - reg = fspi_readl(f, f->iobase + FSPI_INTR);
1131 - if (reg)
1132 - fspi_writel(f, reg, f->iobase + FSPI_INTR);
1133 -
1134 -
1135 /* find the resources - controller memory mapped space */
1136 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fspi_mmap");
1137 f->ahb_addr = devm_ioremap_resource(dev, res);
1138 @@ -1012,6 +1006,11 @@ static int nxp_fspi_probe(struct platform_device *pdev)
1139 goto err_put_ctrl;
1140 }
1141
1142 + /* Clear potential interrupts */
1143 + reg = fspi_readl(f, f->iobase + FSPI_INTR);
1144 + if (reg)
1145 + fspi_writel(f, reg, f->iobase + FSPI_INTR);
1146 +
1147 /* find the irq */
1148 ret = platform_get_irq(pdev, 0);
1149 if (ret < 0)
1150 diff --git a/fs/nilfs2/sysfs.c b/fs/nilfs2/sysfs.c
1151 index e60be7bb55b0b..c6c8a33c81d5e 100644
1152 --- a/fs/nilfs2/sysfs.c
1153 +++ b/fs/nilfs2/sysfs.c
1154 @@ -1054,6 +1054,7 @@ void nilfs_sysfs_delete_device_group(struct the_nilfs *nilfs)
1155 nilfs_sysfs_delete_superblock_group(nilfs);
1156 nilfs_sysfs_delete_segctor_group(nilfs);
1157 kobject_del(&nilfs->ns_dev_kobj);
1158 + kobject_put(&nilfs->ns_dev_kobj);
1159 kfree(nilfs->ns_dev_subgroups);
1160 }
1161
1162 diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
1163 index c1a96fdf598bc..875e002a41804 100644
1164 --- a/include/keys/system_keyring.h
1165 +++ b/include/keys/system_keyring.h
1166 @@ -31,16 +31,37 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
1167 #define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
1168 #endif
1169
1170 +extern struct pkcs7_message *pkcs7;
1171 #ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
1172 extern int mark_hash_blacklisted(const char *hash);
1173 extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
1174 const char *type);
1175 +extern int is_binary_blacklisted(const u8 *hash, size_t hash_len);
1176 #else
1177 static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len,
1178 const char *type)
1179 {
1180 return 0;
1181 }
1182 +
1183 +static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
1184 +{
1185 + return 0;
1186 +}
1187 +#endif
1188 +
1189 +#ifdef CONFIG_SYSTEM_REVOCATION_LIST
1190 +extern int add_key_to_revocation_list(const char *data, size_t size);
1191 +extern int is_key_on_revocation_list(struct pkcs7_message *pkcs7);
1192 +#else
1193 +static inline int add_key_to_revocation_list(const char *data, size_t size)
1194 +{
1195 + return 0;
1196 +}
1197 +static inline int is_key_on_revocation_list(struct pkcs7_message *pkcs7)
1198 +{
1199 + return -ENOKEY;
1200 +}
1201 #endif
1202
1203 #ifdef CONFIG_IMA_BLACKLIST_KEYRING
1204 diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
1205 index d8b86fd391134..d2dbe462efeef 100644
1206 --- a/include/linux/huge_mm.h
1207 +++ b/include/linux/huge_mm.h
1208 @@ -259,6 +259,7 @@ struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
1209 extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
1210
1211 extern struct page *huge_zero_page;
1212 +extern unsigned long huge_zero_pfn;
1213
1214 static inline bool is_huge_zero_page(struct page *page)
1215 {
1216 @@ -267,7 +268,7 @@ static inline bool is_huge_zero_page(struct page *page)
1217
1218 static inline bool is_huge_zero_pmd(pmd_t pmd)
1219 {
1220 - return is_huge_zero_page(pmd_page(pmd));
1221 + return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
1222 }
1223
1224 static inline bool is_huge_zero_pud(pud_t pud)
1225 @@ -398,6 +399,11 @@ static inline bool is_huge_zero_page(struct page *page)
1226 return false;
1227 }
1228
1229 +static inline bool is_huge_zero_pmd(pmd_t pmd)
1230 +{
1231 + return false;
1232 +}
1233 +
1234 static inline bool is_huge_zero_pud(pud_t pud)
1235 {
1236 return false;
1237 diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
1238 index fc717aeb2b3de..a0513c444446d 100644
1239 --- a/include/linux/hugetlb.h
1240 +++ b/include/linux/hugetlb.h
1241 @@ -469,17 +469,6 @@ static inline int hstate_index(struct hstate *h)
1242 return h - hstates;
1243 }
1244
1245 -pgoff_t __basepage_index(struct page *page);
1246 -
1247 -/* Return page->index in PAGE_SIZE units */
1248 -static inline pgoff_t basepage_index(struct page *page)
1249 -{
1250 - if (!PageCompound(page))
1251 - return page->index;
1252 -
1253 - return __basepage_index(page);
1254 -}
1255 -
1256 extern int dissolve_free_huge_page(struct page *page);
1257 extern int dissolve_free_huge_pages(unsigned long start_pfn,
1258 unsigned long end_pfn);
1259 @@ -695,11 +684,6 @@ static inline int hstate_index(struct hstate *h)
1260 return 0;
1261 }
1262
1263 -static inline pgoff_t basepage_index(struct page *page)
1264 -{
1265 - return page->index;
1266 -}
1267 -
1268 static inline int dissolve_free_huge_page(struct page *page)
1269 {
1270 return 0;
1271 diff --git a/include/linux/mm.h b/include/linux/mm.h
1272 index 5565d11f95429..a7d626b4cad1c 100644
1273 --- a/include/linux/mm.h
1274 +++ b/include/linux/mm.h
1275 @@ -1459,6 +1459,7 @@ struct zap_details {
1276 struct address_space *check_mapping; /* Check page->mapping if set */
1277 pgoff_t first_index; /* Lowest page->index to unmap */
1278 pgoff_t last_index; /* Highest page->index to unmap */
1279 + struct page *single_page; /* Locked page to be unmapped */
1280 };
1281
1282 struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
1283 @@ -1505,6 +1506,7 @@ extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
1284 extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
1285 unsigned long address, unsigned int fault_flags,
1286 bool *unlocked);
1287 +void unmap_mapping_page(struct page *page);
1288 void unmap_mapping_pages(struct address_space *mapping,
1289 pgoff_t start, pgoff_t nr, bool even_cows);
1290 void unmap_mapping_range(struct address_space *mapping,
1291 @@ -1525,6 +1527,7 @@ static inline int fixup_user_fault(struct task_struct *tsk,
1292 BUG();
1293 return -EFAULT;
1294 }
1295 +static inline void unmap_mapping_page(struct page *page) { }
1296 static inline void unmap_mapping_pages(struct address_space *mapping,
1297 pgoff_t start, pgoff_t nr, bool even_cows) { }
1298 static inline void unmap_mapping_range(struct address_space *mapping,
1299 diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
1300 index 2ad72d2c8cc52..5d0767cb424aa 100644
1301 --- a/include/linux/mmdebug.h
1302 +++ b/include/linux/mmdebug.h
1303 @@ -37,6 +37,18 @@ void dump_mm(const struct mm_struct *mm);
1304 BUG(); \
1305 } \
1306 } while (0)
1307 +#define VM_WARN_ON_ONCE_PAGE(cond, page) ({ \
1308 + static bool __section(".data.once") __warned; \
1309 + int __ret_warn_once = !!(cond); \
1310 + \
1311 + if (unlikely(__ret_warn_once && !__warned)) { \
1312 + dump_page(page, "VM_WARN_ON_ONCE_PAGE(" __stringify(cond)")");\
1313 + __warned = true; \
1314 + WARN_ON(1); \
1315 + } \
1316 + unlikely(__ret_warn_once); \
1317 +})
1318 +
1319 #define VM_WARN_ON(cond) (void)WARN_ON(cond)
1320 #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond)
1321 #define VM_WARN_ONCE(cond, format...) (void)WARN_ONCE(cond, format)
1322 @@ -48,6 +60,7 @@ void dump_mm(const struct mm_struct *mm);
1323 #define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond)
1324 #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond)
1325 #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond)
1326 +#define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond)
1327 #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
1328 #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
1329 #endif
1330 diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
1331 index 37a4d9e32cd3f..8543b1aaa5299 100644
1332 --- a/include/linux/pagemap.h
1333 +++ b/include/linux/pagemap.h
1334 @@ -397,7 +397,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
1335 }
1336
1337 /*
1338 - * Get index of the page with in radix-tree
1339 + * Get index of the page within radix-tree (but not for hugetlb pages).
1340 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
1341 */
1342 static inline pgoff_t page_to_index(struct page *page)
1343 @@ -416,15 +416,16 @@ static inline pgoff_t page_to_index(struct page *page)
1344 return pgoff;
1345 }
1346
1347 +extern pgoff_t hugetlb_basepage_index(struct page *page);
1348 +
1349 /*
1350 - * Get the offset in PAGE_SIZE.
1351 - * (TODO: hugepage should have ->index in PAGE_SIZE)
1352 + * Get the offset in PAGE_SIZE (even for hugetlb pages).
1353 + * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
1354 */
1355 static inline pgoff_t page_to_pgoff(struct page *page)
1356 {
1357 - if (unlikely(PageHeadHuge(page)))
1358 - return page->index << compound_order(page);
1359 -
1360 + if (unlikely(PageHuge(page)))
1361 + return hugetlb_basepage_index(page);
1362 return page_to_index(page);
1363 }
1364
1365 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
1366 index d7d6d4eb17949..91ccae9467164 100644
1367 --- a/include/linux/rmap.h
1368 +++ b/include/linux/rmap.h
1369 @@ -98,7 +98,8 @@ enum ttu_flags {
1370 * do a final flush if necessary */
1371 TTU_RMAP_LOCKED = 0x80, /* do not grab rmap lock:
1372 * caller holds it */
1373 - TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
1374 + TTU_SPLIT_FREEZE = 0x100, /* freeze pte under splitting thp */
1375 + TTU_SYNC = 0x200, /* avoid racy checks with PVMW_SYNC */
1376 };
1377
1378 #ifdef CONFIG_MMU
1379 diff --git a/include/net/sock.h b/include/net/sock.h
1380 index a0728f24ecc53..d3dd89b6e2cba 100644
1381 --- a/include/net/sock.h
1382 +++ b/include/net/sock.h
1383 @@ -1860,7 +1860,8 @@ static inline u32 net_tx_rndhash(void)
1384
1385 static inline void sk_set_txhash(struct sock *sk)
1386 {
1387 - sk->sk_txhash = net_tx_rndhash();
1388 + /* This pairs with READ_ONCE() in skb_set_hash_from_sk() */
1389 + WRITE_ONCE(sk->sk_txhash, net_tx_rndhash());
1390 }
1391
1392 static inline void sk_rethink_txhash(struct sock *sk)
1393 @@ -2125,9 +2126,12 @@ static inline void sock_poll_wait(struct file *filp, struct socket *sock,
1394
1395 static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
1396 {
1397 - if (sk->sk_txhash) {
1398 + /* This pairs with WRITE_ONCE() in sk_set_txhash() */
1399 + u32 txhash = READ_ONCE(sk->sk_txhash);
1400 +
1401 + if (txhash) {
1402 skb->l4_hash = 1;
1403 - skb->hash = sk->sk_txhash;
1404 + skb->hash = txhash;
1405 }
1406 }
1407
1408 diff --git a/init/Kconfig b/init/Kconfig
1409 index 4f9fd78e2200b..f23e90d9935f5 100644
1410 --- a/init/Kconfig
1411 +++ b/init/Kconfig
1412 @@ -20,6 +20,9 @@ config GCC_VERSION
1413 config CC_IS_CLANG
1414 def_bool $(success,$(CC) --version | head -n 1 | grep -q clang)
1415
1416 +config LD_IS_LLD
1417 + def_bool $(success,$(LD) -v | head -n 1 | grep -q LLD)
1418 +
1419 config CLANG_VERSION
1420 int
1421 default $(shell,$(srctree)/scripts/clang-version.sh $(CC))
1422 diff --git a/kernel/futex.c b/kernel/futex.c
1423 index 375e7e98e301f..f82879ae6577c 100644
1424 --- a/kernel/futex.c
1425 +++ b/kernel/futex.c
1426 @@ -737,7 +737,7 @@ again:
1427
1428 key->both.offset |= FUT_OFF_INODE; /* inode-based key */
1429 key->shared.i_seq = get_inode_sequence_number(inode);
1430 - key->shared.pgoff = basepage_index(tail);
1431 + key->shared.pgoff = page_to_pgoff(tail);
1432 rcu_read_unlock();
1433 }
1434
1435 diff --git a/kernel/kthread.c b/kernel/kthread.c
1436 index 1d4c98a19043f..2eb8d7550324b 100644
1437 --- a/kernel/kthread.c
1438 +++ b/kernel/kthread.c
1439 @@ -1020,8 +1020,38 @@ void kthread_flush_work(struct kthread_work *work)
1440 EXPORT_SYMBOL_GPL(kthread_flush_work);
1441
1442 /*
1443 - * This function removes the work from the worker queue. Also it makes sure
1444 - * that it won't get queued later via the delayed work's timer.
1445 + * Make sure that the timer is neither set nor running and could
1446 + * not manipulate the work list_head any longer.
1447 + *
1448 + * The function is called under worker->lock. The lock is temporary
1449 + * released but the timer can't be set again in the meantime.
1450 + */
1451 +static void kthread_cancel_delayed_work_timer(struct kthread_work *work,
1452 + unsigned long *flags)
1453 +{
1454 + struct kthread_delayed_work *dwork =
1455 + container_of(work, struct kthread_delayed_work, work);
1456 + struct kthread_worker *worker = work->worker;
1457 +
1458 + /*
1459 + * del_timer_sync() must be called to make sure that the timer
1460 + * callback is not running. The lock must be temporary released
1461 + * to avoid a deadlock with the callback. In the meantime,
1462 + * any queuing is blocked by setting the canceling counter.
1463 + */
1464 + work->canceling++;
1465 + raw_spin_unlock_irqrestore(&worker->lock, *flags);
1466 + del_timer_sync(&dwork->timer);
1467 + raw_spin_lock_irqsave(&worker->lock, *flags);
1468 + work->canceling--;
1469 +}
1470 +
1471 +/*
1472 + * This function removes the work from the worker queue.
1473 + *
1474 + * It is called under worker->lock. The caller must make sure that
1475 + * the timer used by delayed work is not running, e.g. by calling
1476 + * kthread_cancel_delayed_work_timer().
1477 *
1478 * The work might still be in use when this function finishes. See the
1479 * current_work proceed by the worker.
1480 @@ -1029,28 +1059,8 @@ EXPORT_SYMBOL_GPL(kthread_flush_work);
1481 * Return: %true if @work was pending and successfully canceled,
1482 * %false if @work was not pending
1483 */
1484 -static bool __kthread_cancel_work(struct kthread_work *work, bool is_dwork,
1485 - unsigned long *flags)
1486 +static bool __kthread_cancel_work(struct kthread_work *work)
1487 {
1488 - /* Try to cancel the timer if exists. */
1489 - if (is_dwork) {
1490 - struct kthread_delayed_work *dwork =
1491 - container_of(work, struct kthread_delayed_work, work);
1492 - struct kthread_worker *worker = work->worker;
1493 -
1494 - /*
1495 - * del_timer_sync() must be called to make sure that the timer
1496 - * callback is not running. The lock must be temporary released
1497 - * to avoid a deadlock with the callback. In the meantime,
1498 - * any queuing is blocked by setting the canceling counter.
1499 - */
1500 - work->canceling++;
1501 - raw_spin_unlock_irqrestore(&worker->lock, *flags);
1502 - del_timer_sync(&dwork->timer);
1503 - raw_spin_lock_irqsave(&worker->lock, *flags);
1504 - work->canceling--;
1505 - }
1506 -
1507 /*
1508 * Try to remove the work from a worker list. It might either
1509 * be from worker->work_list or from worker->delayed_work_list.
1510 @@ -1103,11 +1113,23 @@ bool kthread_mod_delayed_work(struct kthread_worker *worker,
1511 /* Work must not be used with >1 worker, see kthread_queue_work() */
1512 WARN_ON_ONCE(work->worker != worker);
1513
1514 - /* Do not fight with another command that is canceling this work. */
1515 + /*
1516 + * Temporary cancel the work but do not fight with another command
1517 + * that is canceling the work as well.
1518 + *
1519 + * It is a bit tricky because of possible races with another
1520 + * mod_delayed_work() and cancel_delayed_work() callers.
1521 + *
1522 + * The timer must be canceled first because worker->lock is released
1523 + * when doing so. But the work can be removed from the queue (list)
1524 + * only when it can be queued again so that the return value can
1525 + * be used for reference counting.
1526 + */
1527 + kthread_cancel_delayed_work_timer(work, &flags);
1528 if (work->canceling)
1529 goto out;
1530 + ret = __kthread_cancel_work(work);
1531
1532 - ret = __kthread_cancel_work(work, true, &flags);
1533 fast_queue:
1534 __kthread_queue_delayed_work(worker, dwork, delay);
1535 out:
1536 @@ -1129,7 +1151,10 @@ static bool __kthread_cancel_work_sync(struct kthread_work *work, bool is_dwork)
1537 /* Work must not be used with >1 worker, see kthread_queue_work(). */
1538 WARN_ON_ONCE(work->worker != worker);
1539
1540 - ret = __kthread_cancel_work(work, is_dwork, &flags);
1541 + if (is_dwork)
1542 + kthread_cancel_delayed_work_timer(work, &flags);
1543 +
1544 + ret = __kthread_cancel_work(work);
1545
1546 if (worker->current_work != work)
1547 goto out_fast;
1548 diff --git a/kernel/module.c b/kernel/module.c
1549 index 88a6a9e04f8dc..59d487b8d8dad 100644
1550 --- a/kernel/module.c
1551 +++ b/kernel/module.c
1552 @@ -268,9 +268,18 @@ static void module_assert_mutex_or_preempt(void)
1553 #endif
1554 }
1555
1556 +#ifdef CONFIG_MODULE_SIG
1557 static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
1558 module_param(sig_enforce, bool_enable_only, 0644);
1559
1560 +void set_module_sig_enforced(void)
1561 +{
1562 + sig_enforce = true;
1563 +}
1564 +#else
1565 +#define sig_enforce false
1566 +#endif
1567 +
1568 /*
1569 * Export sig_enforce kernel cmdline parameter to allow other subsystems rely
1570 * on that instead of directly to CONFIG_MODULE_SIG_FORCE config.
1571 @@ -281,11 +290,6 @@ bool is_module_sig_enforced(void)
1572 }
1573 EXPORT_SYMBOL(is_module_sig_enforced);
1574
1575 -void set_module_sig_enforced(void)
1576 -{
1577 - sig_enforce = true;
1578 -}
1579 -
1580 /* Block module loading/unloading? */
1581 int modules_disabled = 0;
1582 core_param(nomodule, modules_disabled, bint, 0);
1583 diff --git a/mm/huge_memory.c b/mm/huge_memory.c
1584 index 7bbf419bb86d6..87a07aa61be0d 100644
1585 --- a/mm/huge_memory.c
1586 +++ b/mm/huge_memory.c
1587 @@ -61,6 +61,7 @@ static struct shrinker deferred_split_shrinker;
1588
1589 static atomic_t huge_zero_refcount;
1590 struct page *huge_zero_page __read_mostly;
1591 +unsigned long huge_zero_pfn __read_mostly = ~0UL;
1592
1593 bool transparent_hugepage_enabled(struct vm_area_struct *vma)
1594 {
1595 @@ -97,6 +98,7 @@ retry:
1596 __free_pages(zero_page, compound_order(zero_page));
1597 goto retry;
1598 }
1599 + WRITE_ONCE(huge_zero_pfn, page_to_pfn(zero_page));
1600
1601 /* We take additional reference here. It will be put back by shrinker */
1602 atomic_set(&huge_zero_refcount, 2);
1603 @@ -146,6 +148,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
1604 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
1605 struct page *zero_page = xchg(&huge_zero_page, NULL);
1606 BUG_ON(zero_page == NULL);
1607 + WRITE_ONCE(huge_zero_pfn, ~0UL);
1608 __free_pages(zero_page, compound_order(zero_page));
1609 return HPAGE_PMD_NR;
1610 }
1611 @@ -2155,7 +2158,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1612 count_vm_event(THP_SPLIT_PMD);
1613
1614 if (!vma_is_anonymous(vma)) {
1615 - _pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1616 + old_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
1617 /*
1618 * We are going to unmap this huge page. So
1619 * just go ahead and zap it
1620 @@ -2164,16 +2167,25 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
1621 zap_deposited_table(mm, pmd);
1622 if (vma_is_dax(vma))
1623 return;
1624 - page = pmd_page(_pmd);
1625 - if (!PageDirty(page) && pmd_dirty(_pmd))
1626 - set_page_dirty(page);
1627 - if (!PageReferenced(page) && pmd_young(_pmd))
1628 - SetPageReferenced(page);
1629 - page_remove_rmap(page, true);
1630 - put_page(page);
1631 + if (unlikely(is_pmd_migration_entry(old_pmd))) {
1632 + swp_entry_t entry;
1633 +
1634 + entry = pmd_to_swp_entry(old_pmd);
1635 + page = migration_entry_to_page(entry);
1636 + } else {
1637 + page = pmd_page(old_pmd);
1638 + if (!PageDirty(page) && pmd_dirty(old_pmd))
1639 + set_page_dirty(page);
1640 + if (!PageReferenced(page) && pmd_young(old_pmd))
1641 + SetPageReferenced(page);
1642 + page_remove_rmap(page, true);
1643 + put_page(page);
1644 + }
1645 add_mm_counter(mm, mm_counter_file(page), -HPAGE_PMD_NR);
1646 return;
1647 - } else if (pmd_trans_huge(*pmd) && is_huge_zero_pmd(*pmd)) {
1648 + }
1649 +
1650 + if (is_huge_zero_pmd(*pmd)) {
1651 /*
1652 * FIXME: Do we want to invalidate secondary mmu by calling
1653 * mmu_notifier_invalidate_range() see comments below inside
1654 @@ -2449,16 +2461,16 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
1655 static void unmap_page(struct page *page)
1656 {
1657 enum ttu_flags ttu_flags = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS |
1658 - TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD;
1659 - bool unmap_success;
1660 + TTU_RMAP_LOCKED | TTU_SPLIT_HUGE_PMD | TTU_SYNC;
1661
1662 VM_BUG_ON_PAGE(!PageHead(page), page);
1663
1664 if (PageAnon(page))
1665 ttu_flags |= TTU_SPLIT_FREEZE;
1666
1667 - unmap_success = try_to_unmap(page, ttu_flags);
1668 - VM_BUG_ON_PAGE(!unmap_success, page);
1669 + try_to_unmap(page, ttu_flags);
1670 +
1671 + VM_WARN_ON_ONCE_PAGE(page_mapped(page), page);
1672 }
1673
1674 static void remap_page(struct page *page)
1675 @@ -2737,7 +2749,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1676 struct deferred_split *ds_queue = get_deferred_split_queue(page);
1677 struct anon_vma *anon_vma = NULL;
1678 struct address_space *mapping = NULL;
1679 - int count, mapcount, extra_pins, ret;
1680 + int extra_pins, ret;
1681 bool mlocked;
1682 unsigned long flags;
1683 pgoff_t end;
1684 @@ -2799,7 +2811,6 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1685
1686 mlocked = PageMlocked(page);
1687 unmap_page(head);
1688 - VM_BUG_ON_PAGE(compound_mapcount(head), head);
1689
1690 /* Make sure the page is not on per-CPU pagevec as it takes pin */
1691 if (mlocked)
1692 @@ -2822,9 +2833,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1693
1694 /* Prevent deferred_split_scan() touching ->_refcount */
1695 spin_lock(&ds_queue->split_queue_lock);
1696 - count = page_count(head);
1697 - mapcount = total_mapcount(head);
1698 - if (!mapcount && page_ref_freeze(head, 1 + extra_pins)) {
1699 + if (page_ref_freeze(head, 1 + extra_pins)) {
1700 if (!list_empty(page_deferred_list(head))) {
1701 ds_queue->split_queue_len--;
1702 list_del(page_deferred_list(head));
1703 @@ -2845,16 +2854,9 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
1704 } else
1705 ret = 0;
1706 } else {
1707 - if (IS_ENABLED(CONFIG_DEBUG_VM) && mapcount) {
1708 - pr_alert("total_mapcount: %u, page_count(): %u\n",
1709 - mapcount, count);
1710 - if (PageTail(page))
1711 - dump_page(head, NULL);
1712 - dump_page(page, "total_mapcount(head) > 0");
1713 - BUG();
1714 - }
1715 spin_unlock(&ds_queue->split_queue_lock);
1716 -fail: if (mapping)
1717 +fail:
1718 + if (mapping)
1719 xa_unlock(&mapping->i_pages);
1720 spin_unlock_irqrestore(&pgdata->lru_lock, flags);
1721 remap_page(head);
1722 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1723 index fe15e7d8220ab..95a32749af4da 100644
1724 --- a/mm/hugetlb.c
1725 +++ b/mm/hugetlb.c
1726 @@ -1461,15 +1461,12 @@ int PageHeadHuge(struct page *page_head)
1727 return get_compound_page_dtor(page_head) == free_huge_page;
1728 }
1729
1730 -pgoff_t __basepage_index(struct page *page)
1731 +pgoff_t hugetlb_basepage_index(struct page *page)
1732 {
1733 struct page *page_head = compound_head(page);
1734 pgoff_t index = page_index(page_head);
1735 unsigned long compound_idx;
1736
1737 - if (!PageHuge(page_head))
1738 - return page_index(page);
1739 -
1740 if (compound_order(page_head) >= MAX_ORDER)
1741 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1742 else
1743 diff --git a/mm/internal.h b/mm/internal.h
1744 index 7dd7fbb577a9a..cf382549dd702 100644
1745 --- a/mm/internal.h
1746 +++ b/mm/internal.h
1747 @@ -339,27 +339,52 @@ static inline void mlock_migrate_page(struct page *newpage, struct page *page)
1748 extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
1749
1750 /*
1751 - * At what user virtual address is page expected in @vma?
1752 + * At what user virtual address is page expected in vma?
1753 + * Returns -EFAULT if all of the page is outside the range of vma.
1754 + * If page is a compound head, the entire compound page is considered.
1755 */
1756 static inline unsigned long
1757 -__vma_address(struct page *page, struct vm_area_struct *vma)
1758 +vma_address(struct page *page, struct vm_area_struct *vma)
1759 {
1760 - pgoff_t pgoff = page_to_pgoff(page);
1761 - return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1762 + pgoff_t pgoff;
1763 + unsigned long address;
1764 +
1765 + VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
1766 + pgoff = page_to_pgoff(page);
1767 + if (pgoff >= vma->vm_pgoff) {
1768 + address = vma->vm_start +
1769 + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1770 + /* Check for address beyond vma (or wrapped through 0?) */
1771 + if (address < vma->vm_start || address >= vma->vm_end)
1772 + address = -EFAULT;
1773 + } else if (PageHead(page) &&
1774 + pgoff + compound_nr(page) - 1 >= vma->vm_pgoff) {
1775 + /* Test above avoids possibility of wrap to 0 on 32-bit */
1776 + address = vma->vm_start;
1777 + } else {
1778 + address = -EFAULT;
1779 + }
1780 + return address;
1781 }
1782
1783 +/*
1784 + * Then at what user virtual address will none of the page be found in vma?
1785 + * Assumes that vma_address() already returned a good starting address.
1786 + * If page is a compound head, the entire compound page is considered.
1787 + */
1788 static inline unsigned long
1789 -vma_address(struct page *page, struct vm_area_struct *vma)
1790 +vma_address_end(struct page *page, struct vm_area_struct *vma)
1791 {
1792 - unsigned long start, end;
1793 -
1794 - start = __vma_address(page, vma);
1795 - end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
1796 -
1797 - /* page should be within @vma mapping range */
1798 - VM_BUG_ON_VMA(end < vma->vm_start || start >= vma->vm_end, vma);
1799 -
1800 - return max(start, vma->vm_start);
1801 + pgoff_t pgoff;
1802 + unsigned long address;
1803 +
1804 + VM_BUG_ON_PAGE(PageKsm(page), page); /* KSM page->index unusable */
1805 + pgoff = page_to_pgoff(page) + compound_nr(page);
1806 + address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1807 + /* Check for address beyond vma (or wrapped through 0?) */
1808 + if (address < vma->vm_start || address > vma->vm_end)
1809 + address = vma->vm_end;
1810 + return address;
1811 }
1812
1813 static inline struct file *maybe_unlock_mmap_for_io(struct vm_fault *vmf,
1814 diff --git a/mm/memory.c b/mm/memory.c
1815 index 13a575ce2ec8f..4bb7c6a364c81 100644
1816 --- a/mm/memory.c
1817 +++ b/mm/memory.c
1818 @@ -1165,7 +1165,18 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1819 else if (zap_huge_pmd(tlb, vma, pmd, addr))
1820 goto next;
1821 /* fall through */
1822 + } else if (details && details->single_page &&
1823 + PageTransCompound(details->single_page) &&
1824 + next - addr == HPAGE_PMD_SIZE && pmd_none(*pmd)) {
1825 + spinlock_t *ptl = pmd_lock(tlb->mm, pmd);
1826 + /*
1827 + * Take and drop THP pmd lock so that we cannot return
1828 + * prematurely, while zap_huge_pmd() has cleared *pmd,
1829 + * but not yet decremented compound_mapcount().
1830 + */
1831 + spin_unlock(ptl);
1832 }
1833 +
1834 /*
1835 * Here there can be other concurrent MADV_DONTNEED or
1836 * trans huge page faults running, and if the pmd is
1837 @@ -2769,6 +2780,36 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
1838 }
1839 }
1840
1841 +/**
1842 + * unmap_mapping_page() - Unmap single page from processes.
1843 + * @page: The locked page to be unmapped.
1844 + *
1845 + * Unmap this page from any userspace process which still has it mmaped.
1846 + * Typically, for efficiency, the range of nearby pages has already been
1847 + * unmapped by unmap_mapping_pages() or unmap_mapping_range(). But once
1848 + * truncation or invalidation holds the lock on a page, it may find that
1849 + * the page has been remapped again: and then uses unmap_mapping_page()
1850 + * to unmap it finally.
1851 + */
1852 +void unmap_mapping_page(struct page *page)
1853 +{
1854 + struct address_space *mapping = page->mapping;
1855 + struct zap_details details = { };
1856 +
1857 + VM_BUG_ON(!PageLocked(page));
1858 + VM_BUG_ON(PageTail(page));
1859 +
1860 + details.check_mapping = mapping;
1861 + details.first_index = page->index;
1862 + details.last_index = page->index + hpage_nr_pages(page) - 1;
1863 + details.single_page = page;
1864 +
1865 + i_mmap_lock_write(mapping);
1866 + if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
1867 + unmap_mapping_range_tree(&mapping->i_mmap, &details);
1868 + i_mmap_unlock_write(mapping);
1869 +}
1870 +
1871 /**
1872 * unmap_mapping_pages() - Unmap pages from processes.
1873 * @mapping: The address space containing pages to be unmapped.
1874 diff --git a/mm/migrate.c b/mm/migrate.c
1875 index 00bbe57c1ce22..5092ef2aa8a1f 100644
1876 --- a/mm/migrate.c
1877 +++ b/mm/migrate.c
1878 @@ -321,6 +321,7 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
1879 goto out;
1880
1881 page = migration_entry_to_page(entry);
1882 + page = compound_head(page);
1883
1884 /*
1885 * Once page cache replacement of page migration started, page_count
1886 diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
1887 index eff4b4520c8d5..029f5598251c2 100644
1888 --- a/mm/page_vma_mapped.c
1889 +++ b/mm/page_vma_mapped.c
1890 @@ -111,6 +111,13 @@ static bool check_pte(struct page_vma_mapped_walk *pvmw)
1891 return pfn_in_hpage(pvmw->page, pfn);
1892 }
1893
1894 +static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
1895 +{
1896 + pvmw->address = (pvmw->address + size) & ~(size - 1);
1897 + if (!pvmw->address)
1898 + pvmw->address = ULONG_MAX;
1899 +}
1900 +
1901 /**
1902 * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
1903 * @pvmw->address
1904 @@ -139,6 +146,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
1905 {
1906 struct mm_struct *mm = pvmw->vma->vm_mm;
1907 struct page *page = pvmw->page;
1908 + unsigned long end;
1909 pgd_t *pgd;
1910 p4d_t *p4d;
1911 pud_t *pud;
1912 @@ -148,10 +156,11 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
1913 if (pvmw->pmd && !pvmw->pte)
1914 return not_found(pvmw);
1915
1916 - if (pvmw->pte)
1917 - goto next_pte;
1918 + if (unlikely(PageHuge(page))) {
1919 + /* The only possible mapping was handled on last iteration */
1920 + if (pvmw->pte)
1921 + return not_found(pvmw);
1922
1923 - if (unlikely(PageHuge(pvmw->page))) {
1924 /* when pud is not present, pte will be NULL */
1925 pvmw->pte = huge_pte_offset(mm, pvmw->address, page_size(page));
1926 if (!pvmw->pte)
1927 @@ -163,78 +172,108 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
1928 return not_found(pvmw);
1929 return true;
1930 }
1931 -restart:
1932 - pgd = pgd_offset(mm, pvmw->address);
1933 - if (!pgd_present(*pgd))
1934 - return false;
1935 - p4d = p4d_offset(pgd, pvmw->address);
1936 - if (!p4d_present(*p4d))
1937 - return false;
1938 - pud = pud_offset(p4d, pvmw->address);
1939 - if (!pud_present(*pud))
1940 - return false;
1941 - pvmw->pmd = pmd_offset(pud, pvmw->address);
1942 +
1943 /*
1944 - * Make sure the pmd value isn't cached in a register by the
1945 - * compiler and used as a stale value after we've observed a
1946 - * subsequent update.
1947 + * Seek to next pte only makes sense for THP.
1948 + * But more important than that optimization, is to filter out
1949 + * any PageKsm page: whose page->index misleads vma_address()
1950 + * and vma_address_end() to disaster.
1951 */
1952 - pmde = READ_ONCE(*pvmw->pmd);
1953 - if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
1954 - pvmw->ptl = pmd_lock(mm, pvmw->pmd);
1955 - if (likely(pmd_trans_huge(*pvmw->pmd))) {
1956 - if (pvmw->flags & PVMW_MIGRATION)
1957 - return not_found(pvmw);
1958 - if (pmd_page(*pvmw->pmd) != page)
1959 - return not_found(pvmw);
1960 - return true;
1961 - } else if (!pmd_present(*pvmw->pmd)) {
1962 - if (thp_migration_supported()) {
1963 - if (!(pvmw->flags & PVMW_MIGRATION))
1964 + end = PageTransCompound(page) ?
1965 + vma_address_end(page, pvmw->vma) :
1966 + pvmw->address + PAGE_SIZE;
1967 + if (pvmw->pte)
1968 + goto next_pte;
1969 +restart:
1970 + do {
1971 + pgd = pgd_offset(mm, pvmw->address);
1972 + if (!pgd_present(*pgd)) {
1973 + step_forward(pvmw, PGDIR_SIZE);
1974 + continue;
1975 + }
1976 + p4d = p4d_offset(pgd, pvmw->address);
1977 + if (!p4d_present(*p4d)) {
1978 + step_forward(pvmw, P4D_SIZE);
1979 + continue;
1980 + }
1981 + pud = pud_offset(p4d, pvmw->address);
1982 + if (!pud_present(*pud)) {
1983 + step_forward(pvmw, PUD_SIZE);
1984 + continue;
1985 + }
1986 +
1987 + pvmw->pmd = pmd_offset(pud, pvmw->address);
1988 + /*
1989 + * Make sure the pmd value isn't cached in a register by the
1990 + * compiler and used as a stale value after we've observed a
1991 + * subsequent update.
1992 + */
1993 + pmde = READ_ONCE(*pvmw->pmd);
1994 +
1995 + if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
1996 + pvmw->ptl = pmd_lock(mm, pvmw->pmd);
1997 + pmde = *pvmw->pmd;
1998 + if (likely(pmd_trans_huge(pmde))) {
1999 + if (pvmw->flags & PVMW_MIGRATION)
2000 return not_found(pvmw);
2001 - if (is_migration_entry(pmd_to_swp_entry(*pvmw->pmd))) {
2002 - swp_entry_t entry = pmd_to_swp_entry(*pvmw->pmd);
2003 + if (pmd_page(pmde) != page)
2004 + return not_found(pvmw);
2005 + return true;
2006 + }
2007 + if (!pmd_present(pmde)) {
2008 + swp_entry_t entry;
2009
2010 - if (migration_entry_to_page(entry) != page)
2011 - return not_found(pvmw);
2012 - return true;
2013 - }
2014 + if (!thp_migration_supported() ||
2015 + !(pvmw->flags & PVMW_MIGRATION))
2016 + return not_found(pvmw);
2017 + entry = pmd_to_swp_entry(pmde);
2018 + if (!is_migration_entry(entry) ||
2019 + migration_entry_to_page(entry) != page)
2020 + return not_found(pvmw);
2021 + return true;
2022 }
2023 - return not_found(pvmw);
2024 - } else {
2025 /* THP pmd was split under us: handle on pte level */
2026 spin_unlock(pvmw->ptl);
2027 pvmw->ptl = NULL;
2028 + } else if (!pmd_present(pmde)) {
2029 + /*
2030 + * If PVMW_SYNC, take and drop THP pmd lock so that we
2031 + * cannot return prematurely, while zap_huge_pmd() has
2032 + * cleared *pmd but not decremented compound_mapcount().
2033 + */
2034 + if ((pvmw->flags & PVMW_SYNC) &&
2035 + PageTransCompound(page)) {
2036 + spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
2037 +
2038 + spin_unlock(ptl);
2039 + }
2040 + step_forward(pvmw, PMD_SIZE);
2041 + continue;
2042 }
2043 - } else if (!pmd_present(pmde)) {
2044 - return false;
2045 - }
2046 - if (!map_pte(pvmw))
2047 - goto next_pte;
2048 - while (1) {
2049 + if (!map_pte(pvmw))
2050 + goto next_pte;
2051 +this_pte:
2052 if (check_pte(pvmw))
2053 return true;
2054 next_pte:
2055 - /* Seek to next pte only makes sense for THP */
2056 - if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
2057 - return not_found(pvmw);
2058 do {
2059 pvmw->address += PAGE_SIZE;
2060 - if (pvmw->address >= pvmw->vma->vm_end ||
2061 - pvmw->address >=
2062 - __vma_address(pvmw->page, pvmw->vma) +
2063 - hpage_nr_pages(pvmw->page) * PAGE_SIZE)
2064 + if (pvmw->address >= end)
2065 return not_found(pvmw);
2066 /* Did we cross page table boundary? */
2067 - if (pvmw->address % PMD_SIZE == 0) {
2068 - pte_unmap(pvmw->pte);
2069 + if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
2070 if (pvmw->ptl) {
2071 spin_unlock(pvmw->ptl);
2072 pvmw->ptl = NULL;
2073 }
2074 + pte_unmap(pvmw->pte);
2075 + pvmw->pte = NULL;
2076 goto restart;
2077 - } else {
2078 - pvmw->pte++;
2079 + }
2080 + pvmw->pte++;
2081 + if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
2082 + pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
2083 + spin_lock(pvmw->ptl);
2084 }
2085 } while (pte_none(*pvmw->pte));
2086
2087 @@ -242,7 +281,10 @@ next_pte:
2088 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
2089 spin_lock(pvmw->ptl);
2090 }
2091 - }
2092 + goto this_pte;
2093 + } while (pvmw->address < end);
2094 +
2095 + return false;
2096 }
2097
2098 /**
2099 @@ -261,14 +303,10 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
2100 .vma = vma,
2101 .flags = PVMW_SYNC,
2102 };
2103 - unsigned long start, end;
2104 -
2105 - start = __vma_address(page, vma);
2106 - end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
2107
2108 - if (unlikely(end < vma->vm_start || start >= vma->vm_end))
2109 + pvmw.address = vma_address(page, vma);
2110 + if (pvmw.address == -EFAULT)
2111 return 0;
2112 - pvmw.address = max(start, vma->vm_start);
2113 if (!page_vma_mapped_walk(&pvmw))
2114 return 0;
2115 page_vma_mapped_walk_done(&pvmw);
2116 diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
2117 index 532c29276fcee..49e8a4fbc2051 100644
2118 --- a/mm/pgtable-generic.c
2119 +++ b/mm/pgtable-generic.c
2120 @@ -126,8 +126,8 @@ pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
2121 {
2122 pmd_t pmd;
2123 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
2124 - VM_BUG_ON((pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
2125 - !pmd_devmap(*pmdp)) || !pmd_present(*pmdp));
2126 + VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp) &&
2127 + !pmd_devmap(*pmdp));
2128 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
2129 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
2130 return pmd;
2131 diff --git a/mm/rmap.c b/mm/rmap.c
2132 index 0c7b2a9400d4a..45f2106852e84 100644
2133 --- a/mm/rmap.c
2134 +++ b/mm/rmap.c
2135 @@ -687,7 +687,6 @@ static bool should_defer_flush(struct mm_struct *mm, enum ttu_flags flags)
2136 */
2137 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
2138 {
2139 - unsigned long address;
2140 if (PageAnon(page)) {
2141 struct anon_vma *page__anon_vma = page_anon_vma(page);
2142 /*
2143 @@ -697,15 +696,13 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
2144 if (!vma->anon_vma || !page__anon_vma ||
2145 vma->anon_vma->root != page__anon_vma->root)
2146 return -EFAULT;
2147 - } else if (page->mapping) {
2148 - if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping)
2149 - return -EFAULT;
2150 - } else
2151 + } else if (!vma->vm_file) {
2152 return -EFAULT;
2153 - address = __vma_address(page, vma);
2154 - if (unlikely(address < vma->vm_start || address >= vma->vm_end))
2155 + } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) {
2156 return -EFAULT;
2157 - return address;
2158 + }
2159 +
2160 + return vma_address(page, vma);
2161 }
2162
2163 pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
2164 @@ -899,7 +896,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma,
2165 */
2166 mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
2167 0, vma, vma->vm_mm, address,
2168 - min(vma->vm_end, address + page_size(page)));
2169 + vma_address_end(page, vma));
2170 mmu_notifier_invalidate_range_start(&range);
2171
2172 while (page_vma_mapped_walk(&pvmw)) {
2173 @@ -1353,6 +1350,15 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
2174 struct mmu_notifier_range range;
2175 enum ttu_flags flags = (enum ttu_flags)arg;
2176
2177 + /*
2178 + * When racing against e.g. zap_pte_range() on another cpu,
2179 + * in between its ptep_get_and_clear_full() and page_remove_rmap(),
2180 + * try_to_unmap() may return false when it is about to become true,
2181 + * if page table locking is skipped: use TTU_SYNC to wait for that.
2182 + */
2183 + if (flags & TTU_SYNC)
2184 + pvmw.flags = PVMW_SYNC;
2185 +
2186 /* munlock has nothing to gain from examining un-locked vmas */
2187 if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
2188 return true;
2189 @@ -1374,9 +1380,10 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
2190 * Note that the page can not be free in this function as call of
2191 * try_to_unmap() must hold a reference on the page.
2192 */
2193 + range.end = PageKsm(page) ?
2194 + address + PAGE_SIZE : vma_address_end(page, vma);
2195 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
2196 - address,
2197 - min(vma->vm_end, address + page_size(page)));
2198 + address, range.end);
2199 if (PageHuge(page)) {
2200 /*
2201 * If sharing is possible, start and end will be adjusted
2202 @@ -1690,9 +1697,9 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
2203 return is_vma_temporary_stack(vma);
2204 }
2205
2206 -static int page_mapcount_is_zero(struct page *page)
2207 +static int page_not_mapped(struct page *page)
2208 {
2209 - return !total_mapcount(page);
2210 + return !page_mapped(page);
2211 }
2212
2213 /**
2214 @@ -1710,7 +1717,7 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
2215 struct rmap_walk_control rwc = {
2216 .rmap_one = try_to_unmap_one,
2217 .arg = (void *)flags,
2218 - .done = page_mapcount_is_zero,
2219 + .done = page_not_mapped,
2220 .anon_lock = page_lock_anon_vma_read,
2221 };
2222
2223 @@ -1731,14 +1738,15 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
2224 else
2225 rmap_walk(page, &rwc);
2226
2227 - return !page_mapcount(page) ? true : false;
2228 + /*
2229 + * When racing against e.g. zap_pte_range() on another cpu,
2230 + * in between its ptep_get_and_clear_full() and page_remove_rmap(),
2231 + * try_to_unmap() may return false when it is about to become true,
2232 + * if page table locking is skipped: use TTU_SYNC to wait for that.
2233 + */
2234 + return !page_mapcount(page);
2235 }
2236
2237 -static int page_not_mapped(struct page *page)
2238 -{
2239 - return !page_mapped(page);
2240 -};
2241 -
2242 /**
2243 * try_to_munlock - try to munlock a page
2244 * @page: the page to be munlocked
2245 @@ -1833,6 +1841,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
2246 struct vm_area_struct *vma = avc->vma;
2247 unsigned long address = vma_address(page, vma);
2248
2249 + VM_BUG_ON_VMA(address == -EFAULT, vma);
2250 cond_resched();
2251
2252 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2253 @@ -1887,6 +1896,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
2254 pgoff_start, pgoff_end) {
2255 unsigned long address = vma_address(page, vma);
2256
2257 + VM_BUG_ON_VMA(address == -EFAULT, vma);
2258 cond_resched();
2259
2260 if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
2261 diff --git a/mm/truncate.c b/mm/truncate.c
2262 index dd9ebc1da3566..4d5add7d8ab6d 100644
2263 --- a/mm/truncate.c
2264 +++ b/mm/truncate.c
2265 @@ -173,13 +173,10 @@ void do_invalidatepage(struct page *page, unsigned int offset,
2266 * its lock, b) when a concurrent invalidate_mapping_pages got there first and
2267 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
2268 */
2269 -static void
2270 -truncate_cleanup_page(struct address_space *mapping, struct page *page)
2271 +static void truncate_cleanup_page(struct page *page)
2272 {
2273 - if (page_mapped(page)) {
2274 - pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
2275 - unmap_mapping_pages(mapping, page->index, nr, false);
2276 - }
2277 + if (page_mapped(page))
2278 + unmap_mapping_page(page);
2279
2280 if (page_has_private(page))
2281 do_invalidatepage(page, 0, PAGE_SIZE);
2282 @@ -224,7 +221,7 @@ int truncate_inode_page(struct address_space *mapping, struct page *page)
2283 if (page->mapping != mapping)
2284 return -EIO;
2285
2286 - truncate_cleanup_page(mapping, page);
2287 + truncate_cleanup_page(page);
2288 delete_from_page_cache(page);
2289 return 0;
2290 }
2291 @@ -362,7 +359,7 @@ void truncate_inode_pages_range(struct address_space *mapping,
2292 pagevec_add(&locked_pvec, page);
2293 }
2294 for (i = 0; i < pagevec_count(&locked_pvec); i++)
2295 - truncate_cleanup_page(mapping, locked_pvec.pages[i]);
2296 + truncate_cleanup_page(locked_pvec.pages[i]);
2297 delete_from_page_cache_batch(mapping, &locked_pvec);
2298 for (i = 0; i < pagevec_count(&locked_pvec); i++)
2299 unlock_page(locked_pvec.pages[i]);
2300 @@ -715,6 +712,16 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
2301 continue;
2302 }
2303
2304 + if (!did_range_unmap && page_mapped(page)) {
2305 + /*
2306 + * If page is mapped, before taking its lock,
2307 + * zap the rest of the file in one hit.
2308 + */
2309 + unmap_mapping_pages(mapping, index,
2310 + (1 + end - index), false);
2311 + did_range_unmap = 1;
2312 + }
2313 +
2314 lock_page(page);
2315 WARN_ON(page_to_index(page) != index);
2316 if (page->mapping != mapping) {
2317 @@ -722,23 +729,11 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
2318 continue;
2319 }
2320 wait_on_page_writeback(page);
2321 - if (page_mapped(page)) {
2322 - if (!did_range_unmap) {
2323 - /*
2324 - * Zap the rest of the file in one hit.
2325 - */
2326 - unmap_mapping_pages(mapping, index,
2327 - (1 + end - index), false);
2328 - did_range_unmap = 1;
2329 - } else {
2330 - /*
2331 - * Just zap this page
2332 - */
2333 - unmap_mapping_pages(mapping, index,
2334 - 1, false);
2335 - }
2336 - }
2337 +
2338 + if (page_mapped(page))
2339 + unmap_mapping_page(page);
2340 BUG_ON(page_mapped(page));
2341 +
2342 ret2 = do_launder_page(mapping, page);
2343 if (ret2 == 0) {
2344 if (!invalidate_complete_page2(mapping, page))
2345 diff --git a/net/core/ethtool.c b/net/core/ethtool.c
2346 index 76506975d59a5..cbd1885f24592 100644
2347 --- a/net/core/ethtool.c
2348 +++ b/net/core/ethtool.c
2349 @@ -1508,7 +1508,7 @@ static int ethtool_get_any_eeprom(struct net_device *dev, void __user *useraddr,
2350 if (eeprom.offset + eeprom.len > total_len)
2351 return -EINVAL;
2352
2353 - data = kmalloc(PAGE_SIZE, GFP_USER);
2354 + data = kzalloc(PAGE_SIZE, GFP_USER);
2355 if (!data)
2356 return -ENOMEM;
2357
2358 @@ -1573,7 +1573,7 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
2359 if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
2360 return -EINVAL;
2361
2362 - data = kmalloc(PAGE_SIZE, GFP_USER);
2363 + data = kzalloc(PAGE_SIZE, GFP_USER);
2364 if (!data)
2365 return -ENOMEM;
2366
2367 @@ -1764,7 +1764,7 @@ static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
2368 return -EFAULT;
2369
2370 test.len = test_len;
2371 - data = kmalloc_array(test_len, sizeof(u64), GFP_USER);
2372 + data = kcalloc(test_len, sizeof(u64), GFP_USER);
2373 if (!data)
2374 return -ENOMEM;
2375
2376 @@ -2295,7 +2295,7 @@ static int ethtool_get_tunable(struct net_device *dev, void __user *useraddr)
2377 ret = ethtool_tunable_valid(&tuna);
2378 if (ret)
2379 return ret;
2380 - data = kmalloc(tuna.len, GFP_USER);
2381 + data = kzalloc(tuna.len, GFP_USER);
2382 if (!data)
2383 return -ENOMEM;
2384 ret = ops->get_tunable(dev, &tuna, data);
2385 @@ -2481,7 +2481,7 @@ static int get_phy_tunable(struct net_device *dev, void __user *useraddr)
2386 ret = ethtool_phy_tunable_valid(&tuna);
2387 if (ret)
2388 return ret;
2389 - data = kmalloc(tuna.len, GFP_USER);
2390 + data = kzalloc(tuna.len, GFP_USER);
2391 if (!data)
2392 return -ENOMEM;
2393 mutex_lock(&phydev->lock);
2394 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
2395 index a27d034c85ccb..603a3495afa62 100644
2396 --- a/net/ipv4/devinet.c
2397 +++ b/net/ipv4/devinet.c
2398 @@ -1989,7 +1989,7 @@ static int inet_set_link_af(struct net_device *dev, const struct nlattr *nla)
2399 return -EAFNOSUPPORT;
2400
2401 if (nla_parse_nested_deprecated(tb, IFLA_INET_MAX, nla, NULL, NULL) < 0)
2402 - BUG();
2403 + return -EINVAL;
2404
2405 if (tb[IFLA_INET_CONF]) {
2406 nla_for_each_nested(a, tb[IFLA_INET_CONF], rem)
2407 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
2408 index df6fbefe44d4b..1c3d5d3702a10 100644
2409 --- a/net/ipv4/ping.c
2410 +++ b/net/ipv4/ping.c
2411 @@ -963,6 +963,7 @@ bool ping_rcv(struct sk_buff *skb)
2412 struct sock *sk;
2413 struct net *net = dev_net(skb->dev);
2414 struct icmphdr *icmph = icmp_hdr(skb);
2415 + bool rc = false;
2416
2417 /* We assume the packet has already been checked by icmp_rcv */
2418
2419 @@ -977,14 +978,15 @@ bool ping_rcv(struct sk_buff *skb)
2420 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
2421
2422 pr_debug("rcv on socket %p\n", sk);
2423 - if (skb2)
2424 - ping_queue_rcv_skb(sk, skb2);
2425 + if (skb2 && !ping_queue_rcv_skb(sk, skb2))
2426 + rc = true;
2427 sock_put(sk);
2428 - return true;
2429 }
2430 - pr_debug("no socket, dropping\n");
2431
2432 - return false;
2433 + if (!rc)
2434 + pr_debug("no socket, dropping\n");
2435 +
2436 + return rc;
2437 }
2438 EXPORT_SYMBOL_GPL(ping_rcv);
2439
2440 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
2441 index 52feab2baeee5..366c3792b8604 100644
2442 --- a/net/ipv6/addrconf.c
2443 +++ b/net/ipv6/addrconf.c
2444 @@ -5761,7 +5761,7 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
2445 return -EAFNOSUPPORT;
2446
2447 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
2448 - BUG();
2449 + return -EINVAL;
2450
2451 if (tb[IFLA_INET6_TOKEN]) {
2452 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
2453 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
2454 index a7933279a80b7..e574fbf6745a4 100644
2455 --- a/net/mac80211/ieee80211_i.h
2456 +++ b/net/mac80211/ieee80211_i.h
2457 @@ -1420,7 +1420,7 @@ ieee80211_get_sband(struct ieee80211_sub_if_data *sdata)
2458 rcu_read_lock();
2459 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
2460
2461 - if (WARN_ON_ONCE(!chanctx_conf)) {
2462 + if (!chanctx_conf) {
2463 rcu_read_unlock();
2464 return NULL;
2465 }
2466 diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
2467 index 3d7a5c5e586a6..670d84e54db73 100644
2468 --- a/net/mac80211/rx.c
2469 +++ b/net/mac80211/rx.c
2470 @@ -2200,17 +2200,15 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2471 sc = le16_to_cpu(hdr->seq_ctrl);
2472 frag = sc & IEEE80211_SCTL_FRAG;
2473
2474 - if (is_multicast_ether_addr(hdr->addr1)) {
2475 - I802_DEBUG_INC(rx->local->dot11MulticastReceivedFrameCount);
2476 - goto out_no_led;
2477 - }
2478 -
2479 if (rx->sta)
2480 cache = &rx->sta->frags;
2481
2482 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
2483 goto out;
2484
2485 + if (is_multicast_ether_addr(hdr->addr1))
2486 + return RX_DROP_MONITOR;
2487 +
2488 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
2489
2490 if (skb_linearize(rx->skb))
2491 @@ -2336,7 +2334,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
2492
2493 out:
2494 ieee80211_led_rx(rx->local);
2495 - out_no_led:
2496 if (rx->sta)
2497 rx->sta->rx_stats.packets++;
2498 return RX_CONTINUE;
2499 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
2500 index fbc2d4dfddf0e..0ffbf3d17911a 100644
2501 --- a/net/packet/af_packet.c
2502 +++ b/net/packet/af_packet.c
2503 @@ -2656,7 +2656,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2504 }
2505 if (likely(saddr == NULL)) {
2506 dev = packet_cached_dev_get(po);
2507 - proto = po->num;
2508 + proto = READ_ONCE(po->num);
2509 } else {
2510 err = -EINVAL;
2511 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2512 @@ -2869,7 +2869,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2513
2514 if (likely(saddr == NULL)) {
2515 dev = packet_cached_dev_get(po);
2516 - proto = po->num;
2517 + proto = READ_ONCE(po->num);
2518 } else {
2519 err = -EINVAL;
2520 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2521 @@ -3141,7 +3141,7 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2522 /* prevents packet_notifier() from calling
2523 * register_prot_hook()
2524 */
2525 - po->num = 0;
2526 + WRITE_ONCE(po->num, 0);
2527 __unregister_prot_hook(sk, true);
2528 rcu_read_lock();
2529 dev_curr = po->prot_hook.dev;
2530 @@ -3151,17 +3151,17 @@ static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
2531 }
2532
2533 BUG_ON(po->running);
2534 - po->num = proto;
2535 + WRITE_ONCE(po->num, proto);
2536 po->prot_hook.type = proto;
2537
2538 if (unlikely(unlisted)) {
2539 dev_put(dev);
2540 po->prot_hook.dev = NULL;
2541 - po->ifindex = -1;
2542 + WRITE_ONCE(po->ifindex, -1);
2543 packet_cached_dev_reset(po);
2544 } else {
2545 po->prot_hook.dev = dev;
2546 - po->ifindex = dev ? dev->ifindex : 0;
2547 + WRITE_ONCE(po->ifindex, dev ? dev->ifindex : 0);
2548 packet_cached_dev_assign(po, dev);
2549 }
2550 }
2551 @@ -3475,7 +3475,7 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
2552 uaddr->sa_family = AF_PACKET;
2553 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
2554 rcu_read_lock();
2555 - dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
2556 + dev = dev_get_by_index_rcu(sock_net(sk), READ_ONCE(pkt_sk(sk)->ifindex));
2557 if (dev)
2558 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
2559 rcu_read_unlock();
2560 @@ -3490,16 +3490,18 @@ static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
2561 struct sock *sk = sock->sk;
2562 struct packet_sock *po = pkt_sk(sk);
2563 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
2564 + int ifindex;
2565
2566 if (peer)
2567 return -EOPNOTSUPP;
2568
2569 + ifindex = READ_ONCE(po->ifindex);
2570 sll->sll_family = AF_PACKET;
2571 - sll->sll_ifindex = po->ifindex;
2572 - sll->sll_protocol = po->num;
2573 + sll->sll_ifindex = ifindex;
2574 + sll->sll_protocol = READ_ONCE(po->num);
2575 sll->sll_pkttype = 0;
2576 rcu_read_lock();
2577 - dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
2578 + dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
2579 if (dev) {
2580 sll->sll_hatype = dev->type;
2581 sll->sll_halen = dev->addr_len;
2582 @@ -4099,7 +4101,7 @@ static int packet_notifier(struct notifier_block *this,
2583 }
2584 if (msg == NETDEV_UNREGISTER) {
2585 packet_cached_dev_reset(po);
2586 - po->ifindex = -1;
2587 + WRITE_ONCE(po->ifindex, -1);
2588 if (po->prot_hook.dev)
2589 dev_put(po->prot_hook.dev);
2590 po->prot_hook.dev = NULL;
2591 @@ -4405,7 +4407,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2592 was_running = po->running;
2593 num = po->num;
2594 if (was_running) {
2595 - po->num = 0;
2596 + WRITE_ONCE(po->num, 0);
2597 __unregister_prot_hook(sk, false);
2598 }
2599 spin_unlock(&po->bind_lock);
2600 @@ -4440,7 +4442,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
2601
2602 spin_lock(&po->bind_lock);
2603 if (was_running) {
2604 - po->num = num;
2605 + WRITE_ONCE(po->num, num);
2606 register_prot_hook(sk);
2607 }
2608 spin_unlock(&po->bind_lock);
2609 @@ -4613,8 +4615,8 @@ static int packet_seq_show(struct seq_file *seq, void *v)
2610 s,
2611 refcount_read(&s->sk_refcnt),
2612 s->sk_type,
2613 - ntohs(po->num),
2614 - po->ifindex,
2615 + ntohs(READ_ONCE(po->num)),
2616 + READ_ONCE(po->ifindex),
2617 po->running,
2618 atomic_read(&s->sk_rmem_alloc),
2619 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
2620 diff --git a/net/wireless/util.c b/net/wireless/util.c
2621 index 4eae6ad328514..f0247eab5bc94 100644
2622 --- a/net/wireless/util.c
2623 +++ b/net/wireless/util.c
2624 @@ -1006,6 +1006,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
2625 case NL80211_IFTYPE_MESH_POINT:
2626 /* mesh should be handled? */
2627 break;
2628 + case NL80211_IFTYPE_OCB:
2629 + cfg80211_leave_ocb(rdev, dev);
2630 + break;
2631 default:
2632 break;
2633 }
2634 diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
2635 index f9b19524da112..1e9baa5c4fc6e 100644
2636 --- a/scripts/recordmcount.h
2637 +++ b/scripts/recordmcount.h
2638 @@ -192,15 +192,20 @@ static unsigned int get_symindex(Elf_Sym const *sym, Elf32_Word const *symtab,
2639 Elf32_Word const *symtab_shndx)
2640 {
2641 unsigned long offset;
2642 + unsigned short shndx = w2(sym->st_shndx);
2643 int index;
2644
2645 - if (sym->st_shndx != SHN_XINDEX)
2646 - return w2(sym->st_shndx);
2647 + if (shndx > SHN_UNDEF && shndx < SHN_LORESERVE)
2648 + return shndx;
2649
2650 - offset = (unsigned long)sym - (unsigned long)symtab;
2651 - index = offset / sizeof(*sym);
2652 + if (shndx == SHN_XINDEX) {
2653 + offset = (unsigned long)sym - (unsigned long)symtab;
2654 + index = offset / sizeof(*sym);
2655
2656 - return w(symtab_shndx[index]);
2657 + return w(symtab_shndx[index]);
2658 + }
2659 +
2660 + return 0;
2661 }
2662
2663 static unsigned int get_shnum(Elf_Ehdr const *ehdr, Elf_Shdr const *shdr0)
2664 diff --git a/security/integrity/Makefile b/security/integrity/Makefile
2665 index 35e6ca7737346..351c9662994b5 100644
2666 --- a/security/integrity/Makefile
2667 +++ b/security/integrity/Makefile
2668 @@ -11,7 +11,8 @@ integrity-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
2669 integrity-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
2670 integrity-$(CONFIG_INTEGRITY_PLATFORM_KEYRING) += platform_certs/platform_keyring.o
2671 integrity-$(CONFIG_LOAD_UEFI_KEYS) += platform_certs/efi_parser.o \
2672 - platform_certs/load_uefi.o
2673 + platform_certs/load_uefi.o \
2674 + platform_certs/keyring_handler.o
2675 integrity-$(CONFIG_LOAD_IPL_KEYS) += platform_certs/load_ipl_s390.o
2676
2677 obj-$(CONFIG_IMA) += ima/
2678 diff --git a/security/integrity/platform_certs/keyring_handler.c b/security/integrity/platform_certs/keyring_handler.c
2679 new file mode 100644
2680 index 0000000000000..5604bd57c9907
2681 --- /dev/null
2682 +++ b/security/integrity/platform_certs/keyring_handler.c
2683 @@ -0,0 +1,91 @@
2684 +// SPDX-License-Identifier: GPL-2.0
2685 +
2686 +#include <linux/kernel.h>
2687 +#include <linux/sched.h>
2688 +#include <linux/cred.h>
2689 +#include <linux/err.h>
2690 +#include <linux/efi.h>
2691 +#include <linux/slab.h>
2692 +#include <keys/asymmetric-type.h>
2693 +#include <keys/system_keyring.h>
2694 +#include "../integrity.h"
2695 +
2696 +static efi_guid_t efi_cert_x509_guid __initdata = EFI_CERT_X509_GUID;
2697 +static efi_guid_t efi_cert_x509_sha256_guid __initdata =
2698 + EFI_CERT_X509_SHA256_GUID;
2699 +static efi_guid_t efi_cert_sha256_guid __initdata = EFI_CERT_SHA256_GUID;
2700 +
2701 +/*
2702 + * Blacklist a hash.
2703 + */
2704 +static __init void uefi_blacklist_hash(const char *source, const void *data,
2705 + size_t len, const char *type,
2706 + size_t type_len)
2707 +{
2708 + char *hash, *p;
2709 +
2710 + hash = kmalloc(type_len + len * 2 + 1, GFP_KERNEL);
2711 + if (!hash)
2712 + return;
2713 + p = memcpy(hash, type, type_len);
2714 + p += type_len;
2715 + bin2hex(p, data, len);
2716 + p += len * 2;
2717 + *p = 0;
2718 +
2719 + mark_hash_blacklisted(hash);
2720 + kfree(hash);
2721 +}
2722 +
2723 +/*
2724 + * Blacklist an X509 TBS hash.
2725 + */
2726 +static __init void uefi_blacklist_x509_tbs(const char *source,
2727 + const void *data, size_t len)
2728 +{
2729 + uefi_blacklist_hash(source, data, len, "tbs:", 4);
2730 +}
2731 +
2732 +/*
2733 + * Blacklist the hash of an executable.
2734 + */
2735 +static __init void uefi_blacklist_binary(const char *source,
2736 + const void *data, size_t len)
2737 +{
2738 + uefi_blacklist_hash(source, data, len, "bin:", 4);
2739 +}
2740 +
2741 +/*
2742 + * Add an X509 cert to the revocation list.
2743 + */
2744 +static __init void uefi_revocation_list_x509(const char *source,
2745 + const void *data, size_t len)
2746 +{
2747 + add_key_to_revocation_list(data, len);
2748 +}
2749 +
2750 +/*
2751 + * Return the appropriate handler for particular signature list types found in
2752 + * the UEFI db and MokListRT tables.
2753 + */
2754 +__init efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type)
2755 +{
2756 + if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
2757 + return add_to_platform_keyring;
2758 + return 0;
2759 +}
2760 +
2761 +/*
2762 + * Return the appropriate handler for particular signature list types found in
2763 + * the UEFI dbx and MokListXRT tables.
2764 + */
2765 +__init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type)
2766 +{
2767 + if (efi_guidcmp(*sig_type, efi_cert_x509_sha256_guid) == 0)
2768 + return uefi_blacklist_x509_tbs;
2769 + if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
2770 + return uefi_blacklist_binary;
2771 + if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
2772 + return uefi_revocation_list_x509;
2773 + return 0;
2774 +}
2775 diff --git a/security/integrity/platform_certs/keyring_handler.h b/security/integrity/platform_certs/keyring_handler.h
2776 new file mode 100644
2777 index 0000000000000..2462bfa08fe34
2778 --- /dev/null
2779 +++ b/security/integrity/platform_certs/keyring_handler.h
2780 @@ -0,0 +1,32 @@
2781 +/* SPDX-License-Identifier: GPL-2.0 */
2782 +
2783 +#ifndef PLATFORM_CERTS_INTERNAL_H
2784 +#define PLATFORM_CERTS_INTERNAL_H
2785 +
2786 +#include <linux/efi.h>
2787 +
2788 +void blacklist_hash(const char *source, const void *data,
2789 + size_t len, const char *type,
2790 + size_t type_len);
2791 +
2792 +/*
2793 + * Blacklist an X509 TBS hash.
2794 + */
2795 +void blacklist_x509_tbs(const char *source, const void *data, size_t len);
2796 +
2797 +/*
2798 + * Blacklist the hash of an executable.
2799 + */
2800 +void blacklist_binary(const char *source, const void *data, size_t len);
2801 +
2802 +/*
2803 + * Return the handler for particular signature list types found in the db.
2804 + */
2805 +efi_element_handler_t get_handler_for_db(const efi_guid_t *sig_type);
2806 +
2807 +/*
2808 + * Return the handler for particular signature list types found in the dbx.
2809 + */
2810 +efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type);
2811 +
2812 +#endif
2813 diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
2814 index 020fc7a11ef0e..aa874d84e413e 100644
2815 --- a/security/integrity/platform_certs/load_uefi.c
2816 +++ b/security/integrity/platform_certs/load_uefi.c
2817 @@ -9,6 +9,7 @@
2818 #include <keys/asymmetric-type.h>
2819 #include <keys/system_keyring.h>
2820 #include "../integrity.h"
2821 +#include "keyring_handler.h"
2822
2823 static efi_guid_t efi_cert_x509_guid __initdata = EFI_CERT_X509_GUID;
2824 static efi_guid_t efi_cert_x509_sha256_guid __initdata =
2825 @@ -69,72 +70,6 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
2826 return db;
2827 }
2828
2829 -/*
2830 - * Blacklist a hash.
2831 - */
2832 -static __init void uefi_blacklist_hash(const char *source, const void *data,
2833 - size_t len, const char *type,
2834 - size_t type_len)
2835 -{
2836 - char *hash, *p;
2837 -
2838 - hash = kmalloc(type_len + len * 2 + 1, GFP_KERNEL);
2839 - if (!hash)
2840 - return;
2841 - p = memcpy(hash, type, type_len);
2842 - p += type_len;
2843 - bin2hex(p, data, len);
2844 - p += len * 2;
2845 - *p = 0;
2846 -
2847 - mark_hash_blacklisted(hash);
2848 - kfree(hash);
2849 -}
2850 -
2851 -/*
2852 - * Blacklist an X509 TBS hash.
2853 - */
2854 -static __init void uefi_blacklist_x509_tbs(const char *source,
2855 - const void *data, size_t len)
2856 -{
2857 - uefi_blacklist_hash(source, data, len, "tbs:", 4);
2858 -}
2859 -
2860 -/*
2861 - * Blacklist the hash of an executable.
2862 - */
2863 -static __init void uefi_blacklist_binary(const char *source,
2864 - const void *data, size_t len)
2865 -{
2866 - uefi_blacklist_hash(source, data, len, "bin:", 4);
2867 -}
2868 -
2869 -/*
2870 - * Return the appropriate handler for particular signature list types found in
2871 - * the UEFI db and MokListRT tables.
2872 - */
2873 -static __init efi_element_handler_t get_handler_for_db(const efi_guid_t *
2874 - sig_type)
2875 -{
2876 - if (efi_guidcmp(*sig_type, efi_cert_x509_guid) == 0)
2877 - return add_to_platform_keyring;
2878 - return 0;
2879 -}
2880 -
2881 -/*
2882 - * Return the appropriate handler for particular signature list types found in
2883 - * the UEFI dbx and MokListXRT tables.
2884 - */
2885 -static __init efi_element_handler_t get_handler_for_dbx(const efi_guid_t *
2886 - sig_type)
2887 -{
2888 - if (efi_guidcmp(*sig_type, efi_cert_x509_sha256_guid) == 0)
2889 - return uefi_blacklist_x509_tbs;
2890 - if (efi_guidcmp(*sig_type, efi_cert_sha256_guid) == 0)
2891 - return uefi_blacklist_binary;
2892 - return 0;
2893 -}
2894 -
2895 /*
2896 * Load the certs contained in the UEFI databases into the platform trusted
2897 * keyring and the UEFI blacklisted X.509 cert SHA256 hashes into the blacklist
2898 diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c
2899 index 41cf45416060f..38de88e5ffbb2 100644
2900 --- a/tools/testing/selftests/kvm/lib/kvm_util.c
2901 +++ b/tools/testing/selftests/kvm/lib/kvm_util.c
2902 @@ -54,7 +54,7 @@ int kvm_check_cap(long cap)
2903 exit(KSFT_SKIP);
2904
2905 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
2906 - TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
2907 + TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
2908 " rc: %i errno: %i", ret, errno);
2909
2910 close(kvm_fd);
2911 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
2912 index f83fa0aeeb451..b2287e7d3ba4a 100644
2913 --- a/virt/kvm/kvm_main.c
2914 +++ b/virt/kvm/kvm_main.c
2915 @@ -1593,6 +1593,13 @@ static bool vma_is_valid(struct vm_area_struct *vma, bool write_fault)
2916 return true;
2917 }
2918
2919 +static int kvm_try_get_pfn(kvm_pfn_t pfn)
2920 +{
2921 + if (kvm_is_reserved_pfn(pfn))
2922 + return 1;
2923 + return get_page_unless_zero(pfn_to_page(pfn));
2924 +}
2925 +
2926 static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2927 unsigned long addr, bool *async,
2928 bool write_fault, bool *writable,
2929 @@ -1642,13 +1649,21 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma,
2930 * Whoever called remap_pfn_range is also going to call e.g.
2931 * unmap_mapping_range before the underlying pages are freed,
2932 * causing a call to our MMU notifier.
2933 + *
2934 + * Certain IO or PFNMAP mappings can be backed with valid
2935 + * struct pages, but be allocated without refcounting e.g.,
2936 + * tail pages of non-compound higher order allocations, which
2937 + * would then underflow the refcount when the caller does the
2938 + * required put_page. Don't allow those pages here.
2939 */
2940 - kvm_get_pfn(pfn);
2941 + if (!kvm_try_get_pfn(pfn))
2942 + r = -EFAULT;
2943
2944 out:
2945 pte_unmap_unlock(ptep, ptl);
2946 *p_pfn = pfn;
2947 - return 0;
2948 +
2949 + return r;
2950 }
2951
2952 /*